body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def plug_vifs(self, instance, network_info):
'Plug VIFs into networks.'
for vif in network_info:
self.vif_driver.plug(instance, vif)
| 6,357,100,223,343,190,000
|
Plug VIFs into networks.
|
nova/virt/libvirt/driver.py
|
plug_vifs
|
srajag/nova
|
python
|
def plug_vifs(self, instance, network_info):
for vif in network_info:
self.vif_driver.plug(instance, vif)
|
def _unplug_vifs(self, instance, network_info, ignore_errors):
'Unplug VIFs from networks.'
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if (not ignore_errors):
raise
| -7,604,990,297,459,410,000
|
Unplug VIFs from networks.
|
nova/virt/libvirt/driver.py
|
_unplug_vifs
|
srajag/nova
|
python
|
def _unplug_vifs(self, instance, network_info, ignore_errors):
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if (not ignore_errors):
raise
|
def _cleanup_lvm(self, instance):
'Delete all LVM disks for given instance object.'
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
| 6,992,421,263,557,911,000
|
Delete all LVM disks for given instance object.
|
nova/virt/libvirt/driver.py
|
_cleanup_lvm
|
srajag/nova
|
python
|
def _cleanup_lvm(self, instance):
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
|
def _lvm_disks(self, instance):
'Returns all LVM disks for given instance object.'
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if (not os.path.exists(vg)):
return []
pattern = ('%s_' % instance['uuid'])
def belongs_to_instance_legacy(disk):
pattern = ('%s_' % instance['name'])
if disk.startswith(pattern):
if (CONF.instance_name_template == 'instance-%08x'):
return True
else:
LOG.warn(_LW('Volume %(disk)s possibly unsafe to remove, please clean up manually'), {'disk': disk})
return False
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disk_names.extend(filter(belongs_to_instance_legacy, logical_volumes))
disks = map(fullpath, disk_names)
return disks
return []
| 8,662,966,914,996,749,000
|
Returns all LVM disks for given instance object.
|
nova/virt/libvirt/driver.py
|
_lvm_disks
|
srajag/nova
|
python
|
def _lvm_disks(self, instance):
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if (not os.path.exists(vg)):
return []
pattern = ('%s_' % instance['uuid'])
def belongs_to_instance_legacy(disk):
pattern = ('%s_' % instance['name'])
if disk.startswith(pattern):
if (CONF.instance_name_template == 'instance-%08x'):
return True
else:
LOG.warn(_LW('Volume %(disk)s possibly unsafe to remove, please clean up manually'), {'disk': disk})
return False
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disk_names.extend(filter(belongs_to_instance_legacy, logical_volumes))
disks = map(fullpath, disk_names)
return disks
return []
|
def _swap_volume(self, domain, disk_path, new_path, resize_to):
'Swap existing disk with a new block device.'
xml = domain.XMLDesc((libvirt.VIR_DOMAIN_XML_INACTIVE | libvirt.VIR_DOMAIN_XML_SECURE))
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
if domain.isPersistent():
domain.undefine()
domain.blockRebase(disk_path, new_path, 0, (libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT))
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
if resize_to:
while self._wait_for_block_job(domain, disk_path, wait_for_job_clean=True):
time.sleep(0.5)
domain.blockResize(disk_path, ((resize_to * units.Gi) / units.Ki))
finally:
self._conn.defineXML(xml)
| 1,421,798,263,718,253,300
|
Swap existing disk with a new block device.
|
nova/virt/libvirt/driver.py
|
_swap_volume
|
srajag/nova
|
python
|
def _swap_volume(self, domain, disk_path, new_path, resize_to):
xml = domain.XMLDesc((libvirt.VIR_DOMAIN_XML_INACTIVE | libvirt.VIR_DOMAIN_XML_SECURE))
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
if domain.isPersistent():
domain.undefine()
domain.blockRebase(disk_path, new_path, 0, (libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT))
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
if resize_to:
while self._wait_for_block_job(domain, disk_path, wait_for_job_clean=True):
time.sleep(0.5)
domain.blockResize(disk_path, ((resize_to * units.Gi) / units.Ki))
finally:
self._conn.defineXML(xml)
|
@staticmethod
def _get_disk_xml(xml, device):
'Returns the xml for the disk mounted at device.'
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if (child.tag == 'target'):
if (child.get('dev') == device):
return etree.tostring(node)
| 8,606,600,322,234,641,000
|
Returns the xml for the disk mounted at device.
|
nova/virt/libvirt/driver.py
|
_get_disk_xml
|
srajag/nova
|
python
|
@staticmethod
def _get_disk_xml(xml, device):
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if (child.tag == 'target'):
if (child.get('dev') == device):
return etree.tostring(node)
|
def snapshot(self, context, instance, image_id, update_task_state):
'Create snapshot from a running VM instance.\n\n This command only works with qemu 0.14+\n '
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
base_image_ref = instance['image_ref']
base = compute_utils.get_image_metadata(context, self._image_api, base_image_ref, instance)
snapshot = self._image_api.get(context, image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = (CONF.libvirt.snapshot_image_format or source_format)
if ((image_format == 'lvm') or (image_format == 'rbd')):
image_format = 'raw'
metadata = self._create_snapshot_metadata(base, instance, image_format, snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if (self._has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION, MIN_QEMU_LIVESNAPSHOT_VERSION, REQ_HYPERVISOR_LIVESNAPSHOT) and (not (source_format == 'lvm')) and (not (source_format == 'rbd'))):
live_snapshot = True
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED):
live_snapshot = False
else:
pass
else:
live_snapshot = False
if (state == power_state.SHUTDOWN):
live_snapshot = False
if ((CONF.libvirt.virt_type != 'lxc') and (not live_snapshot)):
if ((state == power_state.RUNNING) or (state == power_state.PAUSED)):
self._detach_pci_devices(virt_dom, pci_manager.get_instance_pci_devs(instance))
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path, image_type=source_format)
if live_snapshot:
LOG.info(_LI('Beginning live snapshot process'), instance=instance)
else:
LOG.info(_LI('Beginning cold snapshot process'), instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
os.chmod(tmpdir, 449)
self._live_snapshot(virt_dom, disk_path, out_path, image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
new_dom = None
if ((CONF.libvirt.virt_type != 'lxc') and (not live_snapshot)):
if (state == power_state.RUNNING):
new_dom = self._create_domain(domain=virt_dom)
elif (state == power_state.PAUSED):
new_dom = self._create_domain(domain=virt_dom, launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if (new_dom is not None):
self._attach_pci_devices(new_dom, pci_manager.get_instance_pci_devs(instance))
LOG.info(_LI('Snapshot extracted, beginning image upload'), instance=instance)
update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context, image_id, metadata, image_file)
LOG.info(_LI('Snapshot image upload complete'), instance=instance)
| -1,372,859,029,786,314,000
|
Create snapshot from a running VM instance.
This command only works with qemu 0.14+
|
nova/virt/libvirt/driver.py
|
snapshot
|
srajag/nova
|
python
|
def snapshot(self, context, instance, image_id, update_task_state):
'Create snapshot from a running VM instance.\n\n This command only works with qemu 0.14+\n '
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
base_image_ref = instance['image_ref']
base = compute_utils.get_image_metadata(context, self._image_api, base_image_ref, instance)
snapshot = self._image_api.get(context, image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = (CONF.libvirt.snapshot_image_format or source_format)
if ((image_format == 'lvm') or (image_format == 'rbd')):
image_format = 'raw'
metadata = self._create_snapshot_metadata(base, instance, image_format, snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if (self._has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION, MIN_QEMU_LIVESNAPSHOT_VERSION, REQ_HYPERVISOR_LIVESNAPSHOT) and (not (source_format == 'lvm')) and (not (source_format == 'rbd'))):
live_snapshot = True
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED):
live_snapshot = False
else:
pass
else:
live_snapshot = False
if (state == power_state.SHUTDOWN):
live_snapshot = False
if ((CONF.libvirt.virt_type != 'lxc') and (not live_snapshot)):
if ((state == power_state.RUNNING) or (state == power_state.PAUSED)):
self._detach_pci_devices(virt_dom, pci_manager.get_instance_pci_devs(instance))
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path, image_type=source_format)
if live_snapshot:
LOG.info(_LI('Beginning live snapshot process'), instance=instance)
else:
LOG.info(_LI('Beginning cold snapshot process'), instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
os.chmod(tmpdir, 449)
self._live_snapshot(virt_dom, disk_path, out_path, image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
new_dom = None
if ((CONF.libvirt.virt_type != 'lxc') and (not live_snapshot)):
if (state == power_state.RUNNING):
new_dom = self._create_domain(domain=virt_dom)
elif (state == power_state.PAUSED):
new_dom = self._create_domain(domain=virt_dom, launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if (new_dom is not None):
self._attach_pci_devices(new_dom, pci_manager.get_instance_pci_devs(instance))
LOG.info(_LI('Snapshot extracted, beginning image upload'), instance=instance)
update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context, image_id, metadata, image_file)
LOG.info(_LI('Snapshot image upload complete'), instance=instance)
|
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False, wait_for_job_clean=False):
'Wait for libvirt block job to complete.\n\n Libvirt may return either cur==end or an empty dict when\n the job is complete, depending on whether the job has been\n cleaned up by libvirt yet, or not.\n\n :returns: True if still in progress\n False if completed\n '
status = domain.blockJobInfo(disk_path, 0)
if ((status == (- 1)) and abort_on_error):
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if wait_for_job_clean:
job_ended = (not status)
else:
job_ended = (cur == end)
return (not job_ended)
| 1,692,038,503,507,733,000
|
Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
|
nova/virt/libvirt/driver.py
|
_wait_for_block_job
|
srajag/nova
|
python
|
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False, wait_for_job_clean=False):
'Wait for libvirt block job to complete.\n\n Libvirt may return either cur==end or an empty dict when\n the job is complete, depending on whether the job has been\n cleaned up by libvirt yet, or not.\n\n :returns: True if still in progress\n False if completed\n '
status = domain.blockJobInfo(disk_path, 0)
if ((status == (- 1)) and abort_on_error):
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if wait_for_job_clean:
job_ended = (not status)
else:
job_ended = (cur == end)
return (not job_ended)
|
def _live_snapshot(self, domain, disk_path, out_path, image_format):
'Snapshot an instance without downtime.'
xml = domain.XMLDesc((libvirt.VIR_DOMAIN_XML_INACTIVE | libvirt.VIR_DOMAIN_XML_SECURE))
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path, basename=False)
disk_delta = (out_path + '.delta')
libvirt_utils.create_cow_image(src_back_path, disk_delta, src_disk_size)
try:
if domain.isPersistent():
domain.undefine()
domain.blockRebase(disk_path, disk_delta, 0, ((libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT) | libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW))
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
libvirt_utils.extract_snapshot(disk_delta, 'qcow2', out_path, image_format)
| 3,208,218,799,842,274,300
|
Snapshot an instance without downtime.
|
nova/virt/libvirt/driver.py
|
_live_snapshot
|
srajag/nova
|
python
|
def _live_snapshot(self, domain, disk_path, out_path, image_format):
xml = domain.XMLDesc((libvirt.VIR_DOMAIN_XML_INACTIVE | libvirt.VIR_DOMAIN_XML_SECURE))
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path, basename=False)
disk_delta = (out_path + '.delta')
libvirt_utils.create_cow_image(src_back_path, disk_delta, src_disk_size)
try:
if domain.isPersistent():
domain.undefine()
domain.blockRebase(disk_path, disk_delta, 0, ((libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT) | libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW))
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
libvirt_utils.extract_snapshot(disk_delta, 'qcow2', out_path, image_format)
|
def _volume_snapshot_update_status(self, context, snapshot_id, status):
'Send a snapshot status update to Cinder.\n\n This method captures and logs exceptions that occur\n since callers cannot do anything useful with these exceptions.\n\n Operations on the Cinder side waiting for this will time out if\n a failure occurs sending the update.\n\n :param context: security context\n :param snapshot_id: id of snapshot being updated\n :param status: new status value\n\n '
try:
self._volume_api.update_snapshot_status(context, snapshot_id, status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status to volume service.'))
| 8,066,054,957,183,952,000
|
Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
|
nova/virt/libvirt/driver.py
|
_volume_snapshot_update_status
|
srajag/nova
|
python
|
def _volume_snapshot_update_status(self, context, snapshot_id, status):
'Send a snapshot status update to Cinder.\n\n This method captures and logs exceptions that occur\n since callers cannot do anything useful with these exceptions.\n\n Operations on the Cinder side waiting for this will time out if\n a failure occurs sending the update.\n\n :param context: security context\n :param snapshot_id: id of snapshot being updated\n :param status: new status value\n\n '
try:
self._volume_api.update_snapshot_status(context, snapshot_id, status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status to volume service.'))
|
def _volume_snapshot_create(self, context, instance, domain, volume_id, snapshot_id, new_file):
'Perform volume snapshot.\n\n :param domain: VM that volume is attached to\n :param volume_id: volume UUID to snapshot\n :param snapshot_id: UUID of snapshot being created\n :param new_file: relative path to new qcow2 file present on share\n\n '
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = []
network_disks_to_snap = []
disks_to_skip = []
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if ((guest_disk.serial is None) or (guest_disk.serial != volume_id)):
disks_to_skip.append(guest_disk.target_dev)
continue
disk_info = {'dev': guest_disk.target_dev, 'serial': guest_disk.serial, 'current_file': guest_disk.source_path, 'source_protocol': guest_disk.source_protocol, 'source_name': guest_disk.source_name, 'source_hosts': guest_disk.source_hosts, 'source_ports': guest_disk.source_ports}
if (disk_info['current_file'] is not None):
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file), new_file)
disks_to_snap.append((current_file, new_file_path))
elif (disk_info['source_protocol'] in ('gluster', 'netfs')):
network_disks_to_snap.append((disk_info, new_file))
if ((not disks_to_snap) and (not network_disks_to_snap)):
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for (current_name, new_filename) in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for (disk_info, new_filename) in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = ('%s/%s' % (old_dir, new_filename))
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug('snap xml: %s', snapshot_xml)
snap_flags = ((libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA) | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml, (snap_flags | QUIESCE))
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, attempting again with quiescing disabled.'))
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, failing volume_snapshot operation.'))
raise
| -5,064,008,524,797,116,000
|
Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param snapshot_id: UUID of snapshot being created
:param new_file: relative path to new qcow2 file present on share
|
nova/virt/libvirt/driver.py
|
_volume_snapshot_create
|
srajag/nova
|
python
|
def _volume_snapshot_create(self, context, instance, domain, volume_id, snapshot_id, new_file):
'Perform volume snapshot.\n\n :param domain: VM that volume is attached to\n :param volume_id: volume UUID to snapshot\n :param snapshot_id: UUID of snapshot being created\n :param new_file: relative path to new qcow2 file present on share\n\n '
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = []
network_disks_to_snap = []
disks_to_skip = []
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if ((guest_disk.serial is None) or (guest_disk.serial != volume_id)):
disks_to_skip.append(guest_disk.target_dev)
continue
disk_info = {'dev': guest_disk.target_dev, 'serial': guest_disk.serial, 'current_file': guest_disk.source_path, 'source_protocol': guest_disk.source_protocol, 'source_name': guest_disk.source_name, 'source_hosts': guest_disk.source_hosts, 'source_ports': guest_disk.source_ports}
if (disk_info['current_file'] is not None):
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file), new_file)
disks_to_snap.append((current_file, new_file_path))
elif (disk_info['source_protocol'] in ('gluster', 'netfs')):
network_disks_to_snap.append((disk_info, new_file))
if ((not disks_to_snap) and (not network_disks_to_snap)):
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for (current_name, new_filename) in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for (disk_info, new_filename) in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = ('%s/%s' % (old_dir, new_filename))
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug('snap xml: %s', snapshot_xml)
snap_flags = ((libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA) | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml, (snap_flags | QUIESCE))
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, attempting again with quiescing disabled.'))
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, failing volume_snapshot operation.'))
raise
|
def volume_snapshot_create(self, context, instance, volume_id, create_info):
"Create snapshots of a Cinder volume via libvirt.\n\n :param instance: VM instance object reference\n :param volume_id: id of volume being snapshotted\n :param create_info: dict of information used to create snapshots\n - snapshot_id : ID of snapshot\n - type : qcow2 / <other>\n - new_file : qcow2 file created by Cinder which\n becomes the VM's active image after\n the snapshot is complete\n "
LOG.debug('volume_snapshot_create: create_info: %(c_info)s', {'c_info': create_info}, instance=instance)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if (create_info['type'] != 'qcow2'):
raise exception.NovaException((_('Unknown type: %s') % create_info['type']))
snapshot_id = create_info.get('snapshot_id', None)
if (snapshot_id is None):
raise exception.NovaException(_('snapshot_id required in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom, volume_id, snapshot_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during volume_snapshot_create, sending error status to Cinder.'))
self._volume_snapshot_update_status(context, snapshot_id, 'error')
self._volume_snapshot_update_status(context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if (snapshot.get('status') != 'creating'):
self._volume_refresh_connection_info(context, instance, volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
| -7,727,998,743,351,326,000
|
Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
|
nova/virt/libvirt/driver.py
|
volume_snapshot_create
|
srajag/nova
|
python
|
def volume_snapshot_create(self, context, instance, volume_id, create_info):
"Create snapshots of a Cinder volume via libvirt.\n\n :param instance: VM instance object reference\n :param volume_id: id of volume being snapshotted\n :param create_info: dict of information used to create snapshots\n - snapshot_id : ID of snapshot\n - type : qcow2 / <other>\n - new_file : qcow2 file created by Cinder which\n becomes the VM's active image after\n the snapshot is complete\n "
LOG.debug('volume_snapshot_create: create_info: %(c_info)s', {'c_info': create_info}, instance=instance)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if (create_info['type'] != 'qcow2'):
raise exception.NovaException((_('Unknown type: %s') % create_info['type']))
snapshot_id = create_info.get('snapshot_id', None)
if (snapshot_id is None):
raise exception.NovaException(_('snapshot_id required in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom, volume_id, snapshot_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during volume_snapshot_create, sending error status to Cinder.'))
self._volume_snapshot_update_status(context, snapshot_id, 'error')
self._volume_snapshot_update_status(context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if (snapshot.get('status') != 'creating'):
self._volume_refresh_connection_info(context, instance, volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
|
def _volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info=None):
"Note:\n if file being merged into == active image:\n do a blockRebase (pull) operation\n else:\n do a blockCommit operation\n Files must be adjacent in snap chain.\n\n :param instance: instance object reference\n :param volume_id: volume UUID\n :param snapshot_id: snapshot UUID (unused currently)\n :param delete_info: {\n 'type': 'qcow2',\n 'file_to_merge': 'a.img',\n 'merge_target_file': 'b.img' or None (if merging file_to_merge into\n active image)\n }\n\n\n Libvirt blockjob handling required for this method is broken\n in versions of libvirt that do not contain:\n http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)\n (Patch is pending in 1.0.5-maint branch as well, but we cannot detect\n libvirt 1.0.5.5 vs. 1.0.5.6 here.)\n "
if (not self._has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION)):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = (_("Libvirt '%s' or later is required for online deletion of volume snapshots.") % ver)
raise exception.Invalid(msg)
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info)
if (delete_info['type'] != 'qcow2'):
msg = (_('Unknown delete_info type %s') % delete_info['type'])
raise exception.NovaException(msg)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if ((guest_disk.target_dev is None) or (guest_disk.serial is None)):
continue
if (guest_disk.serial == volume_id):
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if ((my_dev is None) or ((active_disk is None) and (active_protocol is None))):
msg = (_('Disk with id: %s not found attached to instance.') % volume_id)
LOG.debug('Domain XML: %s', xml)
raise exception.NovaException(msg)
LOG.debug('found device at %s', my_dev)
def _get_snap_dev(filename, backing_store):
if (filename is None):
msg = _('filename cannot be None')
raise exception.NovaException(msg)
LOG.debug(('XML: %s' % xml))
LOG.debug(('active disk object: %s' % active_disk_object))
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if (current_filename == filename_to_merge):
return (my_dev + '[0]')
while (b is not None):
source_filename = b.source_name.split('/')[1]
if (source_filename == filename_to_merge):
LOG.debug(('found match: %s' % b.source_name))
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if (matched_name is None):
msg = (_('no match found for %s') % filename_to_merge)
raise exception.NovaException(msg)
LOG.debug(('index of match (%s) is %s' % (b.source_name, index)))
my_snap_dev = ('%s[%s]' % (my_dev, index))
return my_snap_dev
if (delete_info['merge_target_file'] is None):
rebase_disk = my_dev
rebase_flags = 0
rebase_base = delete_info['file_to_merge']
if (active_protocol is not None):
rebase_base = _get_snap_dev(delete_info['file_to_merge'], active_disk_object.backing_store)
rebase_bw = 0
LOG.debug('disk: %(disk)s, base: %(base)s, bw: %(bw)s, flags: %(flags)s', {'disk': rebase_disk, 'base': rebase_base, 'bw': rebase_bw, 'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base, rebase_bw, rebase_flags)
if (result == 0):
LOG.debug('blockRebase started successfully')
while self._wait_for_block_job(virt_dom, my_dev, abort_on_error=True):
LOG.debug('waiting for blockRebase job completion')
time.sleep(0.5)
else:
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
commit_flags = 0
if (active_protocol is not None):
my_snap_base = _get_snap_dev(delete_info['merge_target_file'], active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'], active_disk_object.backing_store)
try:
commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION])
msg = (_("Relative blockcommit support was not detected. Libvirt '%s' or later is required for online deletion of network storage-backed volume snapshots.") % ver)
raise exception.Invalid(msg)
commit_base = (my_snap_base or delete_info['merge_target_file'])
commit_top = (my_snap_top or delete_info['file_to_merge'])
bandwidth = 0
LOG.debug(('will call blockCommit with commit_disk=%(commit_disk)s commit_base=%(commit_base)s commit_top=%(commit_top)s ' % {'commit_disk': commit_disk, 'commit_base': commit_base, 'commit_top': commit_top}))
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top, bandwidth, commit_flags)
if (result == 0):
LOG.debug('blockCommit started successfully')
while self._wait_for_block_job(virt_dom, my_dev, abort_on_error=True):
LOG.debug('waiting for blockCommit job completion')
time.sleep(0.5)
| -8,897,284,408,874,923,000
|
Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
|
nova/virt/libvirt/driver.py
|
_volume_snapshot_delete
|
srajag/nova
|
python
|
def _volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info=None):
"Note:\n if file being merged into == active image:\n do a blockRebase (pull) operation\n else:\n do a blockCommit operation\n Files must be adjacent in snap chain.\n\n :param instance: instance object reference\n :param volume_id: volume UUID\n :param snapshot_id: snapshot UUID (unused currently)\n :param delete_info: {\n 'type': 'qcow2',\n 'file_to_merge': 'a.img',\n 'merge_target_file': 'b.img' or None (if merging file_to_merge into\n active image)\n }\n\n\n Libvirt blockjob handling required for this method is broken\n in versions of libvirt that do not contain:\n http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)\n (Patch is pending in 1.0.5-maint branch as well, but we cannot detect\n libvirt 1.0.5.5 vs. 1.0.5.6 here.)\n "
if (not self._has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION)):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = (_("Libvirt '%s' or later is required for online deletion of volume snapshots.") % ver)
raise exception.Invalid(msg)
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info)
if (delete_info['type'] != 'qcow2'):
msg = (_('Unknown delete_info type %s') % delete_info['type'])
raise exception.NovaException(msg)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if ((guest_disk.target_dev is None) or (guest_disk.serial is None)):
continue
if (guest_disk.serial == volume_id):
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if ((my_dev is None) or ((active_disk is None) and (active_protocol is None))):
msg = (_('Disk with id: %s not found attached to instance.') % volume_id)
LOG.debug('Domain XML: %s', xml)
raise exception.NovaException(msg)
LOG.debug('found device at %s', my_dev)
def _get_snap_dev(filename, backing_store):
if (filename is None):
msg = _('filename cannot be None')
raise exception.NovaException(msg)
LOG.debug(('XML: %s' % xml))
LOG.debug(('active disk object: %s' % active_disk_object))
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if (current_filename == filename_to_merge):
return (my_dev + '[0]')
while (b is not None):
source_filename = b.source_name.split('/')[1]
if (source_filename == filename_to_merge):
LOG.debug(('found match: %s' % b.source_name))
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if (matched_name is None):
msg = (_('no match found for %s') % filename_to_merge)
raise exception.NovaException(msg)
LOG.debug(('index of match (%s) is %s' % (b.source_name, index)))
my_snap_dev = ('%s[%s]' % (my_dev, index))
return my_snap_dev
if (delete_info['merge_target_file'] is None):
rebase_disk = my_dev
rebase_flags = 0
rebase_base = delete_info['file_to_merge']
if (active_protocol is not None):
rebase_base = _get_snap_dev(delete_info['file_to_merge'], active_disk_object.backing_store)
rebase_bw = 0
LOG.debug('disk: %(disk)s, base: %(base)s, bw: %(bw)s, flags: %(flags)s', {'disk': rebase_disk, 'base': rebase_base, 'bw': rebase_bw, 'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base, rebase_bw, rebase_flags)
if (result == 0):
LOG.debug('blockRebase started successfully')
while self._wait_for_block_job(virt_dom, my_dev, abort_on_error=True):
LOG.debug('waiting for blockRebase job completion')
time.sleep(0.5)
else:
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
commit_flags = 0
if (active_protocol is not None):
my_snap_base = _get_snap_dev(delete_info['merge_target_file'], active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'], active_disk_object.backing_store)
try:
commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION])
msg = (_("Relative blockcommit support was not detected. Libvirt '%s' or later is required for online deletion of network storage-backed volume snapshots.") % ver)
raise exception.Invalid(msg)
commit_base = (my_snap_base or delete_info['merge_target_file'])
commit_top = (my_snap_top or delete_info['file_to_merge'])
bandwidth = 0
LOG.debug(('will call blockCommit with commit_disk=%(commit_disk)s commit_base=%(commit_base)s commit_top=%(commit_top)s ' % {'commit_disk': commit_disk, 'commit_base': commit_base, 'commit_top': commit_top}))
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top, bandwidth, commit_flags)
if (result == 0):
LOG.debug('blockCommit started successfully')
while self._wait_for_block_job(virt_dom, my_dev, abort_on_error=True):
LOG.debug('waiting for blockCommit job completion')
time.sleep(0.5)
|
def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None):
'Reboot a virtual machine, given an instance reference.'
if (reboot_type == 'SOFT'):
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug('Instance soft reboot failed: %s', e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI('Instance soft rebooted successfully.'), instance=instance)
return
else:
LOG.warn(_LW('Failed to soft reboot instance. Trying hard reboot.'), instance=instance)
return self._hard_reboot(context, instance, network_info, block_device_info)
| 3,548,329,928,541,850,600
|
Reboot a virtual machine, given an instance reference.
|
nova/virt/libvirt/driver.py
|
reboot
|
srajag/nova
|
python
|
def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None):
if (reboot_type == 'SOFT'):
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug('Instance soft reboot failed: %s', e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI('Instance soft rebooted successfully.'), instance=instance)
return
else:
LOG.warn(_LW('Failed to soft reboot instance. Trying hard reboot.'), instance=instance)
return self._hard_reboot(context, instance, network_info, block_device_info)
|
def _soft_reboot(self, instance):
'Attempt to shutdown and restart the instance gracefully.\n\n We use shutdown and create here so we can return if the guest\n responded and actually rebooted. Note that this method only\n succeeds if the guest responds to acpi. Therefore we return\n success or failure so we can fall back to a hard reboot if\n necessary.\n\n :returns: True if the reboot succeeded\n '
dom = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
old_domid = dom.ID()
if (state == power_state.RUNNING):
dom.shutdown()
self._prepare_pci_devices_for_use(pci_manager.get_instance_pci_devs(instance))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
new_domid = dom.ID()
if (old_domid != new_domid):
if (state in [power_state.SHUTDOWN, power_state.CRASHED]):
LOG.info(_LI('Instance shutdown successfully.'), instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI('Instance may have been rebooted during soft reboot, so return now.'), instance=instance)
return True
greenthread.sleep(1)
return False
| -4,453,278,297,860,923,400
|
Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
|
nova/virt/libvirt/driver.py
|
_soft_reboot
|
srajag/nova
|
python
|
def _soft_reboot(self, instance):
'Attempt to shutdown and restart the instance gracefully.\n\n We use shutdown and create here so we can return if the guest\n responded and actually rebooted. Note that this method only\n succeeds if the guest responds to acpi. Therefore we return\n success or failure so we can fall back to a hard reboot if\n necessary.\n\n :returns: True if the reboot succeeded\n '
dom = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
old_domid = dom.ID()
if (state == power_state.RUNNING):
dom.shutdown()
self._prepare_pci_devices_for_use(pci_manager.get_instance_pci_devs(instance))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
new_domid = dom.ID()
if (old_domid != new_domid):
if (state in [power_state.SHUTDOWN, power_state.CRASHED]):
LOG.info(_LI('Instance shutdown successfully.'), instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI('Instance may have been rebooted during soft reboot, so return now.'), instance=instance)
return True
greenthread.sleep(1)
return False
|
def _hard_reboot(self, context, instance, network_info, block_device_info=None):
'Reboot a virtual machine, given an instance reference.\n\n Performs a Libvirt reset (if supported) on the domain.\n\n If Libvirt reset is unavailable this method actually destroys and\n re-creates the domain to ensure the reboot happens, as the guest\n OS cannot ignore this action.\n\n If xml is set, it uses the passed in xml in place of the xml from the\n existing domain.\n '
self._destroy(instance)
system_meta = utils.instance_sys_meta(instance)
image_meta = utils.get_image_from_system_metadata(system_meta)
if (not image_meta):
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(context, self._image_api, image_ref, instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info, image_meta)
xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta=image_meta, block_device_info=block_device_info, write_to_disk=True)
disk_info_json = self._get_instance_disk_info(instance['name'], xml, block_device_info)
instance_dir = libvirt_utils.get_instance_path(instance)
self._create_images_and_backing(context, instance, instance_dir, disk_info_json)
self._create_domain_and_network(context, xml, instance, network_info, block_device_info, reboot=True, vifs_already_plugged=True)
self._prepare_pci_devices_for_use(pci_manager.get_instance_pci_devs(instance))
def _wait_for_reboot():
'Called at an interval until the VM is running again.'
state = self.get_info(instance)['state']
if (state == power_state.RUNNING):
LOG.info(_LI('Instance rebooted successfully.'), instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
| 2,541,477,673,550,523,000
|
Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
|
nova/virt/libvirt/driver.py
|
_hard_reboot
|
srajag/nova
|
python
|
def _hard_reboot(self, context, instance, network_info, block_device_info=None):
'Reboot a virtual machine, given an instance reference.\n\n Performs a Libvirt reset (if supported) on the domain.\n\n If Libvirt reset is unavailable this method actually destroys and\n re-creates the domain to ensure the reboot happens, as the guest\n OS cannot ignore this action.\n\n If xml is set, it uses the passed in xml in place of the xml from the\n existing domain.\n '
self._destroy(instance)
system_meta = utils.instance_sys_meta(instance)
image_meta = utils.get_image_from_system_metadata(system_meta)
if (not image_meta):
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(context, self._image_api, image_ref, instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info, image_meta)
xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta=image_meta, block_device_info=block_device_info, write_to_disk=True)
disk_info_json = self._get_instance_disk_info(instance['name'], xml, block_device_info)
instance_dir = libvirt_utils.get_instance_path(instance)
self._create_images_and_backing(context, instance, instance_dir, disk_info_json)
self._create_domain_and_network(context, xml, instance, network_info, block_device_info, reboot=True, vifs_already_plugged=True)
self._prepare_pci_devices_for_use(pci_manager.get_instance_pci_devs(instance))
def _wait_for_reboot():
'Called at an interval until the VM is running again.'
state = self.get_info(instance)['state']
if (state == power_state.RUNNING):
LOG.info(_LI('Instance rebooted successfully.'), instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
|
def pause(self, instance):
'Pause VM instance.'
dom = self._lookup_by_name(instance['name'])
dom.suspend()
| -1,065,690,872,297,552,300
|
Pause VM instance.
|
nova/virt/libvirt/driver.py
|
pause
|
srajag/nova
|
python
|
def pause(self, instance):
dom = self._lookup_by_name(instance['name'])
dom.suspend()
|
def unpause(self, instance):
'Unpause paused VM instance.'
dom = self._lookup_by_name(instance['name'])
dom.resume()
| 1,876,379,601,638,167,000
|
Unpause paused VM instance.
|
nova/virt/libvirt/driver.py
|
unpause
|
srajag/nova
|
python
|
def unpause(self, instance):
dom = self._lookup_by_name(instance['name'])
dom.resume()
|
def power_off(self, instance):
'Power off the specified instance.'
self._destroy(instance)
| 5,946,449,247,883,757,000
|
Power off the specified instance.
|
nova/virt/libvirt/driver.py
|
power_off
|
srajag/nova
|
python
|
def power_off(self, instance):
self._destroy(instance)
|
def power_on(self, context, instance, network_info, block_device_info=None):
'Power on the specified instance.'
self._hard_reboot(context, instance, network_info, block_device_info)
| -6,413,646,608,397,868,000
|
Power on the specified instance.
|
nova/virt/libvirt/driver.py
|
power_on
|
srajag/nova
|
python
|
def power_on(self, context, instance, network_info, block_device_info=None):
self._hard_reboot(context, instance, network_info, block_device_info)
|
def suspend(self, instance):
'Suspend the specified instance.'
dom = self._lookup_by_name(instance['name'])
self._detach_pci_devices(dom, pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)
| -4,455,299,906,648,574,500
|
Suspend the specified instance.
|
nova/virt/libvirt/driver.py
|
suspend
|
srajag/nova
|
python
|
def suspend(self, instance):
dom = self._lookup_by_name(instance['name'])
self._detach_pci_devices(dom, pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)
|
def resume(self, context, instance, network_info, block_device_info=None):
'resume the specified instance.'
xml = self._get_existing_domain_xml(instance, network_info, block_device_info)
dom = self._create_domain_and_network(context, xml, instance, network_info, block_device_info=block_device_info, vifs_already_plugged=True)
self._attach_pci_devices(dom, pci_manager.get_instance_pci_devs(instance))
| 6,518,967,721,012,617,000
|
resume the specified instance.
|
nova/virt/libvirt/driver.py
|
resume
|
srajag/nova
|
python
|
def resume(self, context, instance, network_info, block_device_info=None):
xml = self._get_existing_domain_xml(instance, network_info, block_device_info)
dom = self._create_domain_and_network(context, xml, instance, network_info, block_device_info=block_device_info, vifs_already_plugged=True)
self._attach_pci_devices(dom, pci_manager.get_instance_pci_devs(instance))
|
def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None):
'resume guest state when a host is booted.'
try:
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED)
if (state in ignored_states):
return
except exception.NovaException:
pass
self._hard_reboot(context, instance, network_info, block_device_info)
| 1,156,686,891,336,810,000
|
resume guest state when a host is booted.
|
nova/virt/libvirt/driver.py
|
resume_state_on_host_boot
|
srajag/nova
|
python
|
def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None):
try:
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED)
if (state in ignored_states):
return
except exception.NovaException:
pass
self._hard_reboot(context, instance, network_info, block_device_info)
|
def rescue(self, context, instance, network_info, image_meta, rescue_password):
'Loads a VM using rescue images.\n\n A rescue is normally performed when something goes wrong with the\n primary images and data needs to be corrected/recovered. Rescuing\n should not edit or over-ride the original image, only allow for\n data recovery.\n\n '
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
if (image_meta is not None):
rescue_image_id = image_meta.get('id')
else:
rescue_image_id = None
rescue_images = {'image_id': (rescue_image_id or CONF.libvirt.rescue_image_id or instance.image_ref), 'kernel_id': (CONF.libvirt.rescue_kernel_id or instance.kernel_id), 'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or instance.ramdisk_id)}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, None, image_meta, rescue=True)
self._create_image(context, instance, disk_info['mapping'], '.rescue', rescue_images, network_info=network_info, admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta, rescue=rescue_images, write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
| -1,001,450,618,260,863,000
|
Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
|
nova/virt/libvirt/driver.py
|
rescue
|
srajag/nova
|
python
|
def rescue(self, context, instance, network_info, image_meta, rescue_password):
'Loads a VM using rescue images.\n\n A rescue is normally performed when something goes wrong with the\n primary images and data needs to be corrected/recovered. Rescuing\n should not edit or over-ride the original image, only allow for\n data recovery.\n\n '
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
if (image_meta is not None):
rescue_image_id = image_meta.get('id')
else:
rescue_image_id = None
rescue_images = {'image_id': (rescue_image_id or CONF.libvirt.rescue_image_id or instance.image_ref), 'kernel_id': (CONF.libvirt.rescue_kernel_id or instance.kernel_id), 'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or instance.ramdisk_id)}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, None, image_meta, rescue=True)
self._create_image(context, instance, disk_info['mapping'], '.rescue', rescue_images, network_info=network_info, admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta, rescue=rescue_images, write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
|
def unrescue(self, instance, network_info):
'Reboot the VM which is being rescued back into primary images.\n '
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance.name)
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, '*.rescue')
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
| 1,013,918,113,273,256,200
|
Reboot the VM which is being rescued back into primary images.
|
nova/virt/libvirt/driver.py
|
unrescue
|
srajag/nova
|
python
|
def unrescue(self, instance, network_info):
'\n '
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance.name)
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, '*.rescue')
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
|
@staticmethod
def _create_local(target, local_size, unit='G', fs_format=None, label=None):
'Create a blank image of specified size.'
libvirt_utils.create_image('raw', target, ('%d%c' % (local_size, unit)))
| -9,215,295,529,502,526,000
|
Create a blank image of specified size.
|
nova/virt/libvirt/driver.py
|
_create_local
|
srajag/nova
|
python
|
@staticmethod
def _create_local(target, local_size, unit='G', fs_format=None, label=None):
libvirt_utils.create_image('raw', target, ('%d%c' % (local_size, unit)))
|
@staticmethod
def _create_swap(target, swap_mb, max_size=None):
'Create a swap file of specified size.'
libvirt_utils.create_image('raw', target, ('%dM' % swap_mb))
utils.mkfs('swap', target)
| 4,355,613,304,691,187,000
|
Create a swap file of specified size.
|
nova/virt/libvirt/driver.py
|
_create_swap
|
srajag/nova
|
python
|
@staticmethod
def _create_swap(target, swap_mb, max_size=None):
libvirt_utils.create_image('raw', target, ('%dM' % swap_mb))
utils.mkfs('swap', target)
|
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
'Determines whether the VM is booting from volume\n\n Determines whether the disk mapping indicates that the VM\n is booting from a volume.\n '
return ((not bool(instance.get('image_ref'))) or ('disk' not in disk_mapping))
| -1,955,707,853,542,445,300
|
Determines whether the VM is booting from volume
Determines whether the disk mapping indicates that the VM
is booting from a volume.
|
nova/virt/libvirt/driver.py
|
_is_booted_from_volume
|
srajag/nova
|
python
|
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
'Determines whether the VM is booting from volume\n\n Determines whether the disk mapping indicates that the VM\n is booting from a volume.\n '
return ((not bool(instance.get('image_ref'))) or ('disk' not in disk_mapping))
|
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
'Injects data in a disk image\n\n Helper used for injecting data in a disk image file system.\n\n Keyword arguments:\n instance -- a dict that refers instance specifications\n network_info -- a dict that refers network speficications\n admin_pass -- a string used to set an admin password\n files -- a list of files needs to be injected\n suffix -- a string used as an image name suffix\n '
target_partition = None
if (not instance['kernel_id']):
target_partition = CONF.libvirt.inject_partition
if (target_partition == 0):
target_partition = None
if (CONF.libvirt.virt_type == 'lxc'):
target_partition = None
if (CONF.libvirt.inject_key and instance.get('key_data')):
key = str(instance['key_data'])
else:
key = None
if (not CONF.libvirt.inject_password):
admin_pass = None
net = netutils.get_injected_network_template(network_info, libvirt_virt_type=CONF.libvirt.virt_type)
metadata = instance.get('metadata')
image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_image = self.image_backend.image(instance, ('disk' + suffix), image_type)
img_id = instance['image_ref']
if (not injection_image.check_image_exists()):
LOG.warn(_LW('Image %s not found on disk storage. Continue without injecting data'), injection_image.path, instance=instance)
return
try:
disk.inject_data(injection_image.path, key, net, metadata, admin_pass, files, partition=target_partition, use_cow=CONF.use_cow_images, mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image %(img_id)s (%(e)s)'), {'img_id': img_id, 'e': e}, instance=instance)
| 7,476,893,220,072,604,000
|
Injects data in a disk image
Helper used for injecting data in a disk image file system.
Keyword arguments:
instance -- a dict that refers instance specifications
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
suffix -- a string used as an image name suffix
|
nova/virt/libvirt/driver.py
|
_inject_data
|
srajag/nova
|
python
|
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
'Injects data in a disk image\n\n Helper used for injecting data in a disk image file system.\n\n Keyword arguments:\n instance -- a dict that refers instance specifications\n network_info -- a dict that refers network speficications\n admin_pass -- a string used to set an admin password\n files -- a list of files needs to be injected\n suffix -- a string used as an image name suffix\n '
target_partition = None
if (not instance['kernel_id']):
target_partition = CONF.libvirt.inject_partition
if (target_partition == 0):
target_partition = None
if (CONF.libvirt.virt_type == 'lxc'):
target_partition = None
if (CONF.libvirt.inject_key and instance.get('key_data')):
key = str(instance['key_data'])
else:
key = None
if (not CONF.libvirt.inject_password):
admin_pass = None
net = netutils.get_injected_network_template(network_info, libvirt_virt_type=CONF.libvirt.virt_type)
metadata = instance.get('metadata')
image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_image = self.image_backend.image(instance, ('disk' + suffix), image_type)
img_id = instance['image_ref']
if (not injection_image.check_image_exists()):
LOG.warn(_LW('Image %s not found on disk storage. Continue without injecting data'), injection_image.path, instance=instance)
return
try:
disk.inject_data(injection_image.path, key, net, metadata, admin_pass, files, partition=target_partition, use_cow=CONF.use_cow_images, mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image %(img_id)s (%(e)s)'), {'img_id': img_id, 'e': e}, instance=instance)
|
def _set_host_enabled(self, enabled, disable_reason=DISABLE_REASON_UNDEFINED):
"Enables / Disables the compute service on this host.\n\n This doesn't override non-automatic disablement with an automatic\n setting; thereby permitting operators to keep otherwise\n healthy hosts out of rotation.\n "
status_name = {True: 'disabled', False: 'enabled'}
disable_service = (not enabled)
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if (service.disabled != disable_service):
if ((not service.disabled) or (service.disabled_reason and service.disabled_reason.startswith(DISABLE_PREFIX))):
service.disabled = disable_service
service.disabled_reason = ((DISABLE_PREFIX + disable_reason) if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s', status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service status with: %s', status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_LW('Cannot update service status on host: %s,since it is not registered.'), CONF.host)
except Exception:
LOG.warn(_LW('Cannot update service status on host: %s,due to an unexpected exception.'), CONF.host, exc_info=True)
| -4,019,735,877,820,524,500
|
Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
|
nova/virt/libvirt/driver.py
|
_set_host_enabled
|
srajag/nova
|
python
|
def _set_host_enabled(self, enabled, disable_reason=DISABLE_REASON_UNDEFINED):
"Enables / Disables the compute service on this host.\n\n This doesn't override non-automatic disablement with an automatic\n setting; thereby permitting operators to keep otherwise\n healthy hosts out of rotation.\n "
status_name = {True: 'disabled', False: 'enabled'}
disable_service = (not enabled)
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if (service.disabled != disable_service):
if ((not service.disabled) or (service.disabled_reason and service.disabled_reason.startswith(DISABLE_PREFIX))):
service.disabled = disable_service
service.disabled_reason = ((DISABLE_PREFIX + disable_reason) if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s', status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service status with: %s', status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_LW('Cannot update service status on host: %s,since it is not registered.'), CONF.host)
except Exception:
LOG.warn(_LW('Cannot update service status on host: %s,due to an unexpected exception.'), CONF.host, exc_info=True)
|
def _get_host_capabilities(self):
'Returns an instance of config.LibvirtConfigCaps representing\n the capabilities of the host.\n '
if (not self._caps):
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self._conn.baselineCPU([self._caps.host.cpu.to_xml()], libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if (features and (features != (- 1))):
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_NO_SUPPORT):
LOG.warn(_LW('URI %(uri)s does not support full set of host capabilities: %(error)s'), {'uri': self.uri(), 'error': ex})
else:
raise
return self._caps
| -7,659,002,948,757,558,000
|
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
|
nova/virt/libvirt/driver.py
|
_get_host_capabilities
|
srajag/nova
|
python
|
def _get_host_capabilities(self):
'Returns an instance of config.LibvirtConfigCaps representing\n the capabilities of the host.\n '
if (not self._caps):
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self._conn.baselineCPU([self._caps.host.cpu.to_xml()], libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if (features and (features != (- 1))):
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_NO_SUPPORT):
LOG.warn(_LW('URI %(uri)s does not support full set of host capabilities: %(error)s'), {'uri': self.uri(), 'error': ex})
else:
raise
return self._caps
|
def _get_host_uuid(self):
'Returns a UUID representing the host.'
caps = self._get_host_capabilities()
return caps.host.uuid
| -8,148,530,765,576,642,000
|
Returns a UUID representing the host.
|
nova/virt/libvirt/driver.py
|
_get_host_uuid
|
srajag/nova
|
python
|
def _get_host_uuid(self):
caps = self._get_host_capabilities()
return caps.host.uuid
|
def _get_guest_config_meta(self, context, instance, flavor):
'Get metadata config for guest.'
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance['display_name']
meta.creationTime = time.time()
if (instance['image_ref'] not in ('', None)):
meta.roottype = 'image'
meta.rootid = instance['image_ref']
if (context is not None):
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
| 8,349,429,409,947,018,000
|
Get metadata config for guest.
|
nova/virt/libvirt/driver.py
|
_get_guest_config_meta
|
srajag/nova
|
python
|
def _get_guest_config_meta(self, context, instance, flavor):
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance['display_name']
meta.creationTime = time.time()
if (instance['image_ref'] not in (, None)):
meta.roottype = 'image'
meta.rootid = instance['image_ref']
if (context is not None):
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
|
def _get_guest_config(self, instance, network_info, image_meta, disk_info, rescue=None, block_device_info=None, context=None):
"Get config data for parameters.\n\n :param rescue: optional dictionary that should contain the key\n 'ramdisk_id' if a ramdisk is needed for the rescue image and\n 'kernel_id' if a kernel is needed for the rescue image.\n "
flavor = objects.Flavor.get_by_id(nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = (image_meta.get('properties', {}) if image_meta else {})
CONSOLE = 'console=tty0 console=ttyS0'
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt.virt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = (flavor.memory_mb * units.Ki)
guest.vcpus = flavor.vcpus
guest.cpuset = hardware.get_vcpu_pin_set()
guest.metadata.append(self._get_guest_config_meta(context, instance, flavor))
cputuning = ['shares', 'period', 'quota']
for name in cputuning:
key = ('quota:cpu_' + name)
if (key in flavor.extra_specs):
if (guest.cputune is None):
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
setattr(guest.cputune, name, int(flavor.extra_specs[key]))
guest.cpu = self._get_guest_cpu_config(flavor, image_meta)
if ('root' in disk_mapping):
root_device_name = block_device.prepend_dev(disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
instance.root_device_name = root_device_name
instance.save()
guest.os_type = vm_mode.get_from_instance(instance)
if (guest.os_type is None):
if (CONF.libvirt.virt_type == 'lxc'):
guest.os_type = vm_mode.EXE
elif (CONF.libvirt.virt_type == 'uml'):
guest.os_type = vm_mode.UML
elif (CONF.libvirt.virt_type == 'xen'):
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if ((CONF.libvirt.virt_type == 'xen') and (guest.os_type == vm_mode.HVM)):
guest.os_loader = CONF.libvirt.xen_hvmloader_path
if (CONF.libvirt.virt_type in ('kvm', 'qemu')):
caps = self._get_host_capabilities()
if (caps.host.cpu.arch in ('i686', 'x86_64')):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if ((image_meta is not None) and image_meta.get('properties') and (image_meta['properties'].get('hw_machine_type') is not None)):
guest.os_mach_type = image_meta['properties']['hw_machine_type']
else:
if (caps.host.cpu.arch == 'armv7l'):
guest.os_mach_type = 'vexpress-a15'
if (caps.host.cpu.arch == 'aarch64'):
guest.os_mach_type = 'virt'
if (CONF.libvirt.virt_type == 'lxc'):
guest.os_init_path = '/sbin/init'
guest.os_cmdline = CONSOLE
elif (CONF.libvirt.virt_type == 'uml'):
guest.os_kernel = '/usr/bin/linux'
guest.os_root = root_device_name
elif rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, 'kernel.rescue')
if (CONF.libvirt.virt_type == 'xen'):
guest.os_cmdline = ('ro root=%s' % root_device_name)
else:
guest.os_cmdline = ('root=%s %s' % (root_device_name, CONSOLE))
if (CONF.libvirt.virt_type == 'qemu'):
guest.os_cmdline += ' no_timer_check'
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, 'ramdisk.rescue')
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, 'kernel')
if (CONF.libvirt.virt_type == 'xen'):
guest.os_cmdline = ('ro root=%s' % root_device_name)
else:
guest.os_cmdline = ('root=%s %s' % (root_device_name, CONSOLE))
if (CONF.libvirt.virt_type == 'qemu'):
guest.os_cmdline += ' no_timer_check'
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, 'ramdisk')
if image_meta:
img_props = image_meta.get('properties', {})
if img_props.get('os_command_line'):
guest.os_cmdline = img_props.get('os_command_line')
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
if ((CONF.libvirt.virt_type != 'lxc') and (CONF.libvirt.virt_type != 'uml')):
guest.acpi = True
guest.apic = True
clk = vconfig.LibvirtConfigGuestClock()
if (instance['os_type'] == 'windows'):
LOG.info(_LI('Configuring timezone for windows instance to localtime'), instance=instance)
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if (CONF.libvirt.virt_type == 'kvm'):
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = 'pit'
tmpit.tickpolicy = 'delay'
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = 'rtc'
tmrtc.tickpolicy = 'catchup'
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
arch = libvirt_utils.get_arch(image_meta)
if (arch in ('i686', 'x86_64')):
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = 'hpet'
tmhpet.present = False
clk.add_timer(tmhpet)
for config in self._get_guest_storage_config(instance, image_meta, disk_info, rescue, block_device_info, flavor):
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(instance, vif, image_meta, flavor, CONF.libvirt.virt_type)
guest.add_device(config)
if ((CONF.libvirt.virt_type == 'qemu') or (CONF.libvirt.virt_type == 'kvm')):
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = 'file'
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = 'pty'
guest.add_device(consolepty)
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
elif (CONF.spice.enabled and (not CONF.spice.agent_enabled)):
need_usb_tablet = CONF.libvirt.use_usb_tablet
if (need_usb_tablet and (guest.os_type == vm_mode.HVM)):
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = 'tablet'
tablet.bus = 'usb'
guest.add_device(tablet)
if (CONF.spice.enabled and CONF.spice.agent_enabled and (CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'))):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = 'com.redhat.spice.0'
guest.add_device(channel)
add_video_driver = False
if (CONF.vnc_enabled and (CONF.libvirt.virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = 'vnc'
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if (CONF.spice.enabled and (CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = 'spice'
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
VALID_VIDEO_DEVICES = ('vga', 'cirrus', 'vmvga', 'xen', 'qxl')
video = vconfig.LibvirtConfigGuestVideo()
arch = libvirt_utils.get_arch(image_meta)
if (guest.os_type == vm_mode.XEN):
video.type = 'xen'
elif (arch in ('ppc', 'ppc64')):
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if img_meta_prop.get('hw_video_model'):
video.type = img_meta_prop.get('hw_video_model')
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
video_ram = int(img_meta_prop.get('hw_video_ram', 0))
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if (video_ram > max_vram):
raise exception.RequestedVRamTooHigh(req_vram=video_ram, max_vram=max_vram)
if (max_vram and video_ram):
video.vram = video_ram
guest.add_device(video)
if (CONF.libvirt.virt_type in ('qemu', 'kvm')):
qga_enabled = False
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no')
if (hw_qga.lower() == 'yes'):
LOG.debug('Qemu guest agent is enabled through image metadata', instance=instance)
qga_enabled = True
if qga_enabled:
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = 'unix'
qga.target_name = 'org.qemu.guest_agent.0'
qga.source_path = ('/var/lib/libvirt/qemu/%s.%s.sock' % ('org.qemu.guest_agent.0', instance['name']))
guest.add_device(qga)
if ((img_meta_prop.get('hw_rng_model') == 'virtio') and (flavor.extra_specs.get('hw_rng:allowed', '').lower() == 'true')):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
if (CONF.libvirt.rng_dev_path and (not os.path.exists(CONF.libvirt.rng_dev_path))):
raise exception.RngDeviceNotExist(path=CONF.libvirt.rng_dev_path)
rng_device.backend = CONF.libvirt.rng_dev_path
guest.add_device(rng_device)
if (CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm')):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
elif (len(pci_manager.get_instance_pci_devs(instance)) > 0):
raise exception.PciDeviceUnsupportedHypervisor(type=CONF.libvirt.virt_type)
watchdog_action = flavor.extra_specs.get('hw_watchdog_action', 'disabled')
if ((image_meta is not None) and image_meta.get('properties', {}).get('hw_watchdog_action')):
watchdog_action = image_meta['properties']['hw_watchdog_action']
if (watchdog_action != 'disabled'):
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
return guest
| 1,053,936,662,492,052,500
|
Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
|
nova/virt/libvirt/driver.py
|
_get_guest_config
|
srajag/nova
|
python
|
def _get_guest_config(self, instance, network_info, image_meta, disk_info, rescue=None, block_device_info=None, context=None):
"Get config data for parameters.\n\n :param rescue: optional dictionary that should contain the key\n 'ramdisk_id' if a ramdisk is needed for the rescue image and\n 'kernel_id' if a kernel is needed for the rescue image.\n "
flavor = objects.Flavor.get_by_id(nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = (image_meta.get('properties', {}) if image_meta else {})
CONSOLE = 'console=tty0 console=ttyS0'
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt.virt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = (flavor.memory_mb * units.Ki)
guest.vcpus = flavor.vcpus
guest.cpuset = hardware.get_vcpu_pin_set()
guest.metadata.append(self._get_guest_config_meta(context, instance, flavor))
cputuning = ['shares', 'period', 'quota']
for name in cputuning:
key = ('quota:cpu_' + name)
if (key in flavor.extra_specs):
if (guest.cputune is None):
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
setattr(guest.cputune, name, int(flavor.extra_specs[key]))
guest.cpu = self._get_guest_cpu_config(flavor, image_meta)
if ('root' in disk_mapping):
root_device_name = block_device.prepend_dev(disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
instance.root_device_name = root_device_name
instance.save()
guest.os_type = vm_mode.get_from_instance(instance)
if (guest.os_type is None):
if (CONF.libvirt.virt_type == 'lxc'):
guest.os_type = vm_mode.EXE
elif (CONF.libvirt.virt_type == 'uml'):
guest.os_type = vm_mode.UML
elif (CONF.libvirt.virt_type == 'xen'):
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if ((CONF.libvirt.virt_type == 'xen') and (guest.os_type == vm_mode.HVM)):
guest.os_loader = CONF.libvirt.xen_hvmloader_path
if (CONF.libvirt.virt_type in ('kvm', 'qemu')):
caps = self._get_host_capabilities()
if (caps.host.cpu.arch in ('i686', 'x86_64')):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if ((image_meta is not None) and image_meta.get('properties') and (image_meta['properties'].get('hw_machine_type') is not None)):
guest.os_mach_type = image_meta['properties']['hw_machine_type']
else:
if (caps.host.cpu.arch == 'armv7l'):
guest.os_mach_type = 'vexpress-a15'
if (caps.host.cpu.arch == 'aarch64'):
guest.os_mach_type = 'virt'
if (CONF.libvirt.virt_type == 'lxc'):
guest.os_init_path = '/sbin/init'
guest.os_cmdline = CONSOLE
elif (CONF.libvirt.virt_type == 'uml'):
guest.os_kernel = '/usr/bin/linux'
guest.os_root = root_device_name
elif rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, 'kernel.rescue')
if (CONF.libvirt.virt_type == 'xen'):
guest.os_cmdline = ('ro root=%s' % root_device_name)
else:
guest.os_cmdline = ('root=%s %s' % (root_device_name, CONSOLE))
if (CONF.libvirt.virt_type == 'qemu'):
guest.os_cmdline += ' no_timer_check'
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, 'ramdisk.rescue')
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, 'kernel')
if (CONF.libvirt.virt_type == 'xen'):
guest.os_cmdline = ('ro root=%s' % root_device_name)
else:
guest.os_cmdline = ('root=%s %s' % (root_device_name, CONSOLE))
if (CONF.libvirt.virt_type == 'qemu'):
guest.os_cmdline += ' no_timer_check'
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, 'ramdisk')
if image_meta:
img_props = image_meta.get('properties', {})
if img_props.get('os_command_line'):
guest.os_cmdline = img_props.get('os_command_line')
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
if ((CONF.libvirt.virt_type != 'lxc') and (CONF.libvirt.virt_type != 'uml')):
guest.acpi = True
guest.apic = True
clk = vconfig.LibvirtConfigGuestClock()
if (instance['os_type'] == 'windows'):
LOG.info(_LI('Configuring timezone for windows instance to localtime'), instance=instance)
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if (CONF.libvirt.virt_type == 'kvm'):
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = 'pit'
tmpit.tickpolicy = 'delay'
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = 'rtc'
tmrtc.tickpolicy = 'catchup'
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
arch = libvirt_utils.get_arch(image_meta)
if (arch in ('i686', 'x86_64')):
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = 'hpet'
tmhpet.present = False
clk.add_timer(tmhpet)
for config in self._get_guest_storage_config(instance, image_meta, disk_info, rescue, block_device_info, flavor):
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(instance, vif, image_meta, flavor, CONF.libvirt.virt_type)
guest.add_device(config)
if ((CONF.libvirt.virt_type == 'qemu') or (CONF.libvirt.virt_type == 'kvm')):
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = 'file'
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = 'pty'
guest.add_device(consolepty)
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
elif (CONF.spice.enabled and (not CONF.spice.agent_enabled)):
need_usb_tablet = CONF.libvirt.use_usb_tablet
if (need_usb_tablet and (guest.os_type == vm_mode.HVM)):
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = 'tablet'
tablet.bus = 'usb'
guest.add_device(tablet)
if (CONF.spice.enabled and CONF.spice.agent_enabled and (CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'))):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = 'com.redhat.spice.0'
guest.add_device(channel)
add_video_driver = False
if (CONF.vnc_enabled and (CONF.libvirt.virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = 'vnc'
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if (CONF.spice.enabled and (CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = 'spice'
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
VALID_VIDEO_DEVICES = ('vga', 'cirrus', 'vmvga', 'xen', 'qxl')
video = vconfig.LibvirtConfigGuestVideo()
arch = libvirt_utils.get_arch(image_meta)
if (guest.os_type == vm_mode.XEN):
video.type = 'xen'
elif (arch in ('ppc', 'ppc64')):
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if img_meta_prop.get('hw_video_model'):
video.type = img_meta_prop.get('hw_video_model')
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
video_ram = int(img_meta_prop.get('hw_video_ram', 0))
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if (video_ram > max_vram):
raise exception.RequestedVRamTooHigh(req_vram=video_ram, max_vram=max_vram)
if (max_vram and video_ram):
video.vram = video_ram
guest.add_device(video)
if (CONF.libvirt.virt_type in ('qemu', 'kvm')):
qga_enabled = False
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no')
if (hw_qga.lower() == 'yes'):
LOG.debug('Qemu guest agent is enabled through image metadata', instance=instance)
qga_enabled = True
if qga_enabled:
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = 'unix'
qga.target_name = 'org.qemu.guest_agent.0'
qga.source_path = ('/var/lib/libvirt/qemu/%s.%s.sock' % ('org.qemu.guest_agent.0', instance['name']))
guest.add_device(qga)
if ((img_meta_prop.get('hw_rng_model') == 'virtio') and (flavor.extra_specs.get('hw_rng:allowed', ).lower() == 'true')):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
if (CONF.libvirt.rng_dev_path and (not os.path.exists(CONF.libvirt.rng_dev_path))):
raise exception.RngDeviceNotExist(path=CONF.libvirt.rng_dev_path)
rng_device.backend = CONF.libvirt.rng_dev_path
guest.add_device(rng_device)
if (CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm')):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
elif (len(pci_manager.get_instance_pci_devs(instance)) > 0):
raise exception.PciDeviceUnsupportedHypervisor(type=CONF.libvirt.virt_type)
watchdog_action = flavor.extra_specs.get('hw_watchdog_action', 'disabled')
if ((image_meta is not None) and image_meta.get('properties', {}).get('hw_watchdog_action')):
watchdog_action = image_meta['properties']['hw_watchdog_action']
if (watchdog_action != 'disabled'):
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
return guest
|
def _lookup_by_id(self, instance_id):
'Retrieve libvirt domain object given an instance id.\n\n All libvirt error handling should be handled in this method and\n relevant nova exceptions should be raised in response.\n\n '
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_NO_DOMAIN):
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_('Error from libvirt while looking up %(instance_id)s: [Error Code %(error_code)s] %(ex)s') % {'instance_id': instance_id, 'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
| 7,288,528,978,716,452,000
|
Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
|
nova/virt/libvirt/driver.py
|
_lookup_by_id
|
srajag/nova
|
python
|
def _lookup_by_id(self, instance_id):
'Retrieve libvirt domain object given an instance id.\n\n All libvirt error handling should be handled in this method and\n relevant nova exceptions should be raised in response.\n\n '
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_NO_DOMAIN):
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_('Error from libvirt while looking up %(instance_id)s: [Error Code %(error_code)s] %(ex)s') % {'instance_id': instance_id, 'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
|
def _lookup_by_name(self, instance_name):
'Retrieve libvirt domain object given an instance name.\n\n All libvirt error handling should be handled in this method and\n relevant nova exceptions should be raised in response.\n\n '
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_NO_DOMAIN):
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: [Error Code %(error_code)s] %(ex)s') % {'instance_name': instance_name, 'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
| -8,928,353,716,722,507,000
|
Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
|
nova/virt/libvirt/driver.py
|
_lookup_by_name
|
srajag/nova
|
python
|
def _lookup_by_name(self, instance_name):
'Retrieve libvirt domain object given an instance name.\n\n All libvirt error handling should be handled in this method and\n relevant nova exceptions should be raised in response.\n\n '
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_NO_DOMAIN):
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: [Error Code %(error_code)s] %(ex)s') % {'instance_name': instance_name, 'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
|
def get_info(self, instance):
'Retrieve information from libvirt for a specific instance name.\n\n If a libvirt error is encountered during lookup, we might raise a\n NotFound exception or Error exception depending on how severe the\n libvirt error is.\n\n '
virt_dom = self._lookup_by_name(instance['name'])
dom_info = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[dom_info[0]], 'max_mem': dom_info[1], 'mem': dom_info[2], 'num_cpu': dom_info[3], 'cpu_time': dom_info[4], 'id': virt_dom.ID()}
| -168,255,364,237,726,800
|
Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
|
nova/virt/libvirt/driver.py
|
get_info
|
srajag/nova
|
python
|
def get_info(self, instance):
'Retrieve information from libvirt for a specific instance name.\n\n If a libvirt error is encountered during lookup, we might raise a\n NotFound exception or Error exception depending on how severe the\n libvirt error is.\n\n '
virt_dom = self._lookup_by_name(instance['name'])
dom_info = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[dom_info[0]], 'max_mem': dom_info[1], 'mem': dom_info[2], 'num_cpu': dom_info[3], 'cpu_time': dom_info[4], 'id': virt_dom.ID()}
|
def _create_domain(self, xml=None, domain=None, instance=None, launch_flags=0, power_on=True):
'Create a domain.\n\n Either domain or xml must be passed in. If both are passed, then\n the domain definition is overwritten from the xml.\n '
err = None
if (instance and (CONF.libvirt.virt_type == 'lxc')):
self._create_domain_setup_lxc(instance)
try:
if xml:
err = (_LE('Error defining a domain with XML: %s') % xml)
domain = self._conn.defineXML(xml)
if power_on:
err = (_LE('Error launching a defined domain with XML: %s') % domain.XMLDesc(0))
domain.createWithFlags(launch_flags)
if (not utils.is_neutron()):
err = (_LE('Error enabling hairpin mode with XML: %s') % domain.XMLDesc(0))
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
if err:
LOG.error(err)
finally:
if (instance and (CONF.libvirt.virt_type == 'lxc')):
self._create_domain_cleanup_lxc(instance)
return domain
| -3,845,477,721,825,623,000
|
Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
|
nova/virt/libvirt/driver.py
|
_create_domain
|
srajag/nova
|
python
|
def _create_domain(self, xml=None, domain=None, instance=None, launch_flags=0, power_on=True):
'Create a domain.\n\n Either domain or xml must be passed in. If both are passed, then\n the domain definition is overwritten from the xml.\n '
err = None
if (instance and (CONF.libvirt.virt_type == 'lxc')):
self._create_domain_setup_lxc(instance)
try:
if xml:
err = (_LE('Error defining a domain with XML: %s') % xml)
domain = self._conn.defineXML(xml)
if power_on:
err = (_LE('Error launching a defined domain with XML: %s') % domain.XMLDesc(0))
domain.createWithFlags(launch_flags)
if (not utils.is_neutron()):
err = (_LE('Error enabling hairpin mode with XML: %s') % domain.XMLDesc(0))
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
if err:
LOG.error(err)
finally:
if (instance and (CONF.libvirt.virt_type == 'lxc')):
self._create_domain_cleanup_lxc(instance)
return domain
|
def _create_domain_and_network(self, context, xml, instance, network_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False):
'Do required network setup and create domain.'
block_device_mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, vol)
conf = self._connect_volume(connection_info, disk_info)
if ('data' in connection_info):
connection_info['data']['device_path'] = conf.source_path
vol['connection_info'] = connection_info
vol.save(context)
if ((not reboot) and ('data' in connection_info) and ('volume_id' in connection_info['data'])):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info, encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and utils.is_neutron() and (not vifs_already_plugged) and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
launch_flags = ((events and libvirt.VIR_DOMAIN_START_PAUSED) or 0)
domain = None
try:
with self.virtapi.wait_for_instance_event(instance, events, deadline=timeout, error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml, instance=instance, launch_flags=launch_flags, power_on=power_on)
self.firewall_driver.apply_instance_filter(instance, network_info)
except exception.VirtualInterfaceCreateException:
with excutils.save_and_reraise_exception():
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info, block_device_info=block_device_info)
except eventlet.timeout.Timeout:
LOG.warn(_LW('Timeout waiting for vif plugging callback for instance %(uuid)s'), {'uuid': instance['uuid']})
if CONF.vif_plugging_is_fatal:
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info, block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
if (launch_flags & libvirt.VIR_DOMAIN_START_PAUSED):
domain.resume()
return domain
| -3,292,688,747,660,380,000
|
Do required network setup and create domain.
|
nova/virt/libvirt/driver.py
|
_create_domain_and_network
|
srajag/nova
|
python
|
def _create_domain_and_network(self, context, xml, instance, network_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False):
block_device_mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, vol)
conf = self._connect_volume(connection_info, disk_info)
if ('data' in connection_info):
connection_info['data']['device_path'] = conf.source_path
vol['connection_info'] = connection_info
vol.save(context)
if ((not reboot) and ('data' in connection_info) and ('volume_id' in connection_info['data'])):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info, encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and utils.is_neutron() and (not vifs_already_plugged) and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
launch_flags = ((events and libvirt.VIR_DOMAIN_START_PAUSED) or 0)
domain = None
try:
with self.virtapi.wait_for_instance_event(instance, events, deadline=timeout, error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml, instance=instance, launch_flags=launch_flags, power_on=power_on)
self.firewall_driver.apply_instance_filter(instance, network_info)
except exception.VirtualInterfaceCreateException:
with excutils.save_and_reraise_exception():
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info, block_device_info=block_device_info)
except eventlet.timeout.Timeout:
LOG.warn(_LW('Timeout waiting for vif plugging callback for instance %(uuid)s'), {'uuid': instance['uuid']})
if CONF.vif_plugging_is_fatal:
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info, block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
if (launch_flags & libvirt.VIR_DOMAIN_START_PAUSED):
domain.resume()
return domain
|
def _get_all_block_devices(self):
'Return all block devices in use on this node.'
devices = []
for dom in self._list_instance_domains():
try:
doc = etree.fromstring(dom.XMLDesc(0))
except libvirt.libvirtError as e:
LOG.warn((_LW("couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s") % {'uuid': dom.UUIDString(), 'ex': e}))
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if (node.get('type') != 'block'):
continue
for child in node.getchildren():
if (child.tag == 'source'):
devices.append(child.get('dev'))
return devices
| -7,571,256,240,773,195,000
|
Return all block devices in use on this node.
|
nova/virt/libvirt/driver.py
|
_get_all_block_devices
|
srajag/nova
|
python
|
def _get_all_block_devices(self):
devices = []
for dom in self._list_instance_domains():
try:
doc = etree.fromstring(dom.XMLDesc(0))
except libvirt.libvirtError as e:
LOG.warn((_LW("couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s") % {'uuid': dom.UUIDString(), 'ex': e}))
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if (node.get('type') != 'block'):
continue
for child in node.getchildren():
if (child.tag == 'source'):
devices.append(child.get('dev'))
return devices
|
def _get_interfaces(self, xml):
'Note that this function takes a domain xml.\n\n Returns a list of all network interfaces for this instance.\n '
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if (child.tag == 'target'):
devdst = child.attrib['dev']
if (devdst is None):
continue
interfaces.append(devdst)
return interfaces
| 1,829,918,215,923,531,000
|
Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
|
nova/virt/libvirt/driver.py
|
_get_interfaces
|
srajag/nova
|
python
|
def _get_interfaces(self, xml):
'Note that this function takes a domain xml.\n\n Returns a list of all network interfaces for this instance.\n '
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if (child.tag == 'target'):
devdst = child.attrib['dev']
if (devdst is None):
continue
interfaces.append(devdst)
return interfaces
|
def _get_vcpu_total(self):
'Get available vcpu number of physical computer.\n\n :returns: the number of cpu core instances can be used.\n\n '
if (self._vcpu_total != 0):
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_LW('Cannot get the number of cpu, because this function is not implemented for this platform. '))
return 0
if (CONF.vcpu_pin_set is None):
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = hardware.get_vcpu_pin_set()
if (available_ids[(- 1)] >= total_pcpus):
raise exception.Invalid(_('Invalid vcpu_pin_set config, out of hypervisor cpu range.'))
self._vcpu_total = len(available_ids)
return self._vcpu_total
| 4,877,681,997,652,236,000
|
Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
|
nova/virt/libvirt/driver.py
|
_get_vcpu_total
|
srajag/nova
|
python
|
def _get_vcpu_total(self):
'Get available vcpu number of physical computer.\n\n :returns: the number of cpu core instances can be used.\n\n '
if (self._vcpu_total != 0):
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_LW('Cannot get the number of cpu, because this function is not implemented for this platform. '))
return 0
if (CONF.vcpu_pin_set is None):
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = hardware.get_vcpu_pin_set()
if (available_ids[(- 1)] >= total_pcpus):
raise exception.Invalid(_('Invalid vcpu_pin_set config, out of hypervisor cpu range.'))
self._vcpu_total = len(available_ids)
return self._vcpu_total
|
def _get_memory_mb_total(self):
'Get the total memory size(MB) of physical computer.\n\n :returns: the total amount of memory(MB).\n\n '
return self._conn.getInfo()[1]
| -6,204,214,114,700,270,000
|
Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
|
nova/virt/libvirt/driver.py
|
_get_memory_mb_total
|
srajag/nova
|
python
|
def _get_memory_mb_total(self):
'Get the total memory size(MB) of physical computer.\n\n :returns: the total amount of memory(MB).\n\n '
return self._conn.getInfo()[1]
|
@staticmethod
def _get_local_gb_info():
'Get local storage info of the compute node in GB.\n\n :returns: A dict containing:\n :total: How big the overall usable filesystem is (in gigabytes)\n :free: How much space is free (in gigabytes)\n :used: How much space is used (in gigabytes)\n '
if (CONF.libvirt.images_type == 'lvm'):
info = lvm.get_volume_group_info(CONF.libvirt.images_volume_group)
elif (CONF.libvirt.images_type == 'rbd'):
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = (v / units.Gi)
return info
| 6,157,551,686,383,488,000
|
Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
|
nova/virt/libvirt/driver.py
|
_get_local_gb_info
|
srajag/nova
|
python
|
@staticmethod
def _get_local_gb_info():
'Get local storage info of the compute node in GB.\n\n :returns: A dict containing:\n :total: How big the overall usable filesystem is (in gigabytes)\n :free: How much space is free (in gigabytes)\n :used: How much space is used (in gigabytes)\n '
if (CONF.libvirt.images_type == 'lvm'):
info = lvm.get_volume_group_info(CONF.libvirt.images_volume_group)
elif (CONF.libvirt.images_type == 'rbd'):
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = (v / units.Gi)
return info
|
def _get_vcpu_used(self):
'Get vcpu usage number of physical computer.\n\n :returns: The total number of vcpu(s) that are currently being used.\n\n '
total = 0
if (CONF.libvirt.virt_type == 'lxc'):
return (total + 1)
for dom in self._list_instance_domains():
try:
vcpus = dom.vcpus()
except libvirt.libvirtError as e:
LOG.warn((_LW("couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s") % {'uuid': dom.UUIDString(), 'ex': e}))
else:
if ((vcpus is not None) and (len(vcpus) > 1)):
total += len(vcpus[1])
greenthread.sleep(0)
return total
| -3,860,857,789,327,115,300
|
Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
|
nova/virt/libvirt/driver.py
|
_get_vcpu_used
|
srajag/nova
|
python
|
def _get_vcpu_used(self):
'Get vcpu usage number of physical computer.\n\n :returns: The total number of vcpu(s) that are currently being used.\n\n '
total = 0
if (CONF.libvirt.virt_type == 'lxc'):
return (total + 1)
for dom in self._list_instance_domains():
try:
vcpus = dom.vcpus()
except libvirt.libvirtError as e:
LOG.warn((_LW("couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s") % {'uuid': dom.UUIDString(), 'ex': e}))
else:
if ((vcpus is not None) and (len(vcpus) > 1)):
total += len(vcpus[1])
greenthread.sleep(0)
return total
|
def _get_memory_mb_used(self):
'Get the used memory size(MB) of physical computer.\n\n :returns: the total usage of memory(MB).\n\n '
if (sys.platform.upper() not in ['LINUX2', 'LINUX3']):
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if (CONF.libvirt.virt_type == 'xen'):
used = 0
for dom in self._list_instance_domains(only_guests=False):
try:
dom_mem = int(dom.info()[2])
except libvirt.libvirtError as e:
LOG.warn((_LW("couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s") % {'uuid': dom.UUIDString(), 'ex': e}))
continue
if (dom.ID() != 0):
used += dom_mem
else:
used += (dom_mem - ((int(m[(idx1 + 1)]) + int(m[(idx2 + 1)])) + int(m[(idx3 + 1)])))
return (used / units.Ki)
else:
avail = ((int(m[(idx1 + 1)]) + int(m[(idx2 + 1)])) + int(m[(idx3 + 1)]))
return (self._get_memory_mb_total() - (avail / units.Ki))
| -1,535,954,299,046,830,000
|
Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
|
nova/virt/libvirt/driver.py
|
_get_memory_mb_used
|
srajag/nova
|
python
|
def _get_memory_mb_used(self):
'Get the used memory size(MB) of physical computer.\n\n :returns: the total usage of memory(MB).\n\n '
if (sys.platform.upper() not in ['LINUX2', 'LINUX3']):
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if (CONF.libvirt.virt_type == 'xen'):
used = 0
for dom in self._list_instance_domains(only_guests=False):
try:
dom_mem = int(dom.info()[2])
except libvirt.libvirtError as e:
LOG.warn((_LW("couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s") % {'uuid': dom.UUIDString(), 'ex': e}))
continue
if (dom.ID() != 0):
used += dom_mem
else:
used += (dom_mem - ((int(m[(idx1 + 1)]) + int(m[(idx2 + 1)])) + int(m[(idx3 + 1)])))
return (used / units.Ki)
else:
avail = ((int(m[(idx1 + 1)]) + int(m[(idx2 + 1)])) + int(m[(idx3 + 1)]))
return (self._get_memory_mb_total() - (avail / units.Ki))
|
def _get_hypervisor_type(self):
'Get hypervisor type.\n\n :returns: hypervisor type (ex. qemu)\n\n '
return self._conn.getType()
| 5,286,859,397,592,172,000
|
Get hypervisor type.
:returns: hypervisor type (ex. qemu)
|
nova/virt/libvirt/driver.py
|
_get_hypervisor_type
|
srajag/nova
|
python
|
def _get_hypervisor_type(self):
'Get hypervisor type.\n\n :returns: hypervisor type (ex. qemu)\n\n '
return self._conn.getType()
|
def _get_hypervisor_version(self):
'Get hypervisor version.\n\n :returns: hypervisor version (ex. 12003)\n\n '
method = getattr(self._conn, 'getVersion', None)
if (method is None):
raise exception.NovaException(_('libvirt version is too old (does not support getVersion)'))
return method()
| -7,621,861,062,212,571,000
|
Get hypervisor version.
:returns: hypervisor version (ex. 12003)
|
nova/virt/libvirt/driver.py
|
_get_hypervisor_version
|
srajag/nova
|
python
|
def _get_hypervisor_version(self):
'Get hypervisor version.\n\n :returns: hypervisor version (ex. 12003)\n\n '
method = getattr(self._conn, 'getVersion', None)
if (method is None):
raise exception.NovaException(_('libvirt version is too old (does not support getVersion)'))
return method()
|
def _get_hypervisor_hostname(self):
'Returns the hostname of the hypervisor.'
hostname = self._conn.getHostname()
if (not hasattr(self, '_hypervisor_hostname')):
self._hypervisor_hostname = hostname
elif (hostname != self._hypervisor_hostname):
LOG.error(_LE('Hostname has changed from %(old)s to %(new)s. A restart is required to take effect.'), {'old': self._hypervisor_hostname, 'new': hostname})
return self._hypervisor_hostname
| 375,618,949,531,053,760
|
Returns the hostname of the hypervisor.
|
nova/virt/libvirt/driver.py
|
_get_hypervisor_hostname
|
srajag/nova
|
python
|
def _get_hypervisor_hostname(self):
hostname = self._conn.getHostname()
if (not hasattr(self, '_hypervisor_hostname')):
self._hypervisor_hostname = hostname
elif (hostname != self._hypervisor_hostname):
LOG.error(_LE('Hostname has changed from %(old)s to %(new)s. A restart is required to take effect.'), {'old': self._hypervisor_hostname, 'new': hostname})
return self._hypervisor_hostname
|
def _get_instance_capabilities(self):
'Get hypervisor instance capabilities\n\n Returns a list of tuples that describe instances the\n hypervisor is capable of hosting. Each tuple consists\n of the triplet (arch, hypervisor_type, vm_mode).\n\n :returns: List of tuples describing instance capabilities\n '
caps = self._get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
| -8,739,905,543,423,807,000
|
Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
|
nova/virt/libvirt/driver.py
|
_get_instance_capabilities
|
srajag/nova
|
python
|
def _get_instance_capabilities(self):
'Get hypervisor instance capabilities\n\n Returns a list of tuples that describe instances the\n hypervisor is capable of hosting. Each tuple consists\n of the triplet (arch, hypervisor_type, vm_mode).\n\n :returns: List of tuples describing instance capabilities\n '
caps = self._get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
|
def _get_cpu_info(self):
'Get cpuinfo information.\n\n Obtains cpu feature from virConnect.getCapabilities,\n and returns as a json string.\n\n :return: see above description\n\n '
caps = self._get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
return jsonutils.dumps(cpu_info)
| 3,019,756,992,942,653,000
|
Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
|
nova/virt/libvirt/driver.py
|
_get_cpu_info
|
srajag/nova
|
python
|
def _get_cpu_info(self):
'Get cpuinfo information.\n\n Obtains cpu feature from virConnect.getCapabilities,\n and returns as a json string.\n\n :return: see above description\n\n '
caps = self._get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
return jsonutils.dumps(cpu_info)
|
def _get_pcidev_info(self, devname):
'Returns a dict of PCI device.'
def _get_device_type(cfgdev):
"Get a PCI device's device type.\n\n An assignable PCI device can be a normal PCI device,\n a SR-IOV Physical Function (PF), or a SR-IOV Virtual\n Function (VF). Only normal PCI devices or SR-IOV VFs\n are assignable, while SR-IOV PFs are always owned by\n hypervisor.\n\n Please notice that a PCI device with SR-IOV\n capability but not enabled is reported as normal PCI device.\n "
for fun_cap in cfgdev.pci_capability.fun_capability:
if (len(fun_cap.device_addrs) != 0):
if (fun_cap.type == 'virt_functions'):
return {'dev_type': 'type-PF'}
if (fun_cap.type == 'phys_function'):
phys_address = ('%s:%s:%s.%s' % (fun_cap.device_addrs[0][0].replace('0x', ''), fun_cap.device_addrs[0][1].replace('0x', ''), fun_cap.device_addrs[0][2].replace('0x', ''), fun_cap.device_addrs[0][3].replace('0x', '')))
return {'dev_type': 'type-VF', 'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = ('%04x:%02x:%02x.%1x' % (cfgdev.pci_capability.domain, cfgdev.pci_capability.bus, cfgdev.pci_capability.slot, cfgdev.pci_capability.function))
device = {'dev_id': cfgdev.name, 'address': address, 'product_id': cfgdev.pci_capability.product_id[2:6], 'vendor_id': cfgdev.pci_capability.vendor_id[2:6]}
device['label'] = ('label_%(vendor_id)s_%(product_id)s' % device)
device.update(_get_device_type(cfgdev))
return device
| -7,456,508,015,902,004,000
|
Returns a dict of PCI device.
|
nova/virt/libvirt/driver.py
|
_get_pcidev_info
|
srajag/nova
|
python
|
def _get_pcidev_info(self, devname):
def _get_device_type(cfgdev):
"Get a PCI device's device type.\n\n An assignable PCI device can be a normal PCI device,\n a SR-IOV Physical Function (PF), or a SR-IOV Virtual\n Function (VF). Only normal PCI devices or SR-IOV VFs\n are assignable, while SR-IOV PFs are always owned by\n hypervisor.\n\n Please notice that a PCI device with SR-IOV\n capability but not enabled is reported as normal PCI device.\n "
for fun_cap in cfgdev.pci_capability.fun_capability:
if (len(fun_cap.device_addrs) != 0):
if (fun_cap.type == 'virt_functions'):
return {'dev_type': 'type-PF'}
if (fun_cap.type == 'phys_function'):
phys_address = ('%s:%s:%s.%s' % (fun_cap.device_addrs[0][0].replace('0x', ), fun_cap.device_addrs[0][1].replace('0x', ), fun_cap.device_addrs[0][2].replace('0x', ), fun_cap.device_addrs[0][3].replace('0x', )))
return {'dev_type': 'type-VF', 'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = ('%04x:%02x:%02x.%1x' % (cfgdev.pci_capability.domain, cfgdev.pci_capability.bus, cfgdev.pci_capability.slot, cfgdev.pci_capability.function))
device = {'dev_id': cfgdev.name, 'address': address, 'product_id': cfgdev.pci_capability.product_id[2:6], 'vendor_id': cfgdev.pci_capability.vendor_id[2:6]}
device['label'] = ('label_%(vendor_id)s_%(product_id)s' % device)
device.update(_get_device_type(cfgdev))
return device
|
def _get_pci_passthrough_devices(self):
"Get host PCI devices information.\n\n Obtains pci devices information from libvirt, and returns\n as a JSON string.\n\n Each device information is a dictionary, with mandatory keys\n of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',\n 'label' and other optional device specific information.\n\n Refer to the objects/pci_device.py for more idea of these keys.\n\n :returns: a JSON string containaing a list of the assignable PCI\n devices information\n "
if (not getattr(self, '_list_devices_supported', True)):
return jsonutils.dumps([])
try:
dev_names = (self._conn.listDevices('pci', 0) or [])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_NO_SUPPORT):
self._list_devices_supported = False
LOG.warn(_LW('URI %(uri)s does not support listDevices: %(error)s'), {'uri': self.uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_dev = self._get_pcidev_info(name)
if self._pci_device_assignable(pci_dev):
pci_info.append(pci_dev)
return jsonutils.dumps(pci_info)
| 5,520,751,107,575,842,000
|
Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containaing a list of the assignable PCI
devices information
|
nova/virt/libvirt/driver.py
|
_get_pci_passthrough_devices
|
srajag/nova
|
python
|
def _get_pci_passthrough_devices(self):
"Get host PCI devices information.\n\n Obtains pci devices information from libvirt, and returns\n as a JSON string.\n\n Each device information is a dictionary, with mandatory keys\n of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',\n 'label' and other optional device specific information.\n\n Refer to the objects/pci_device.py for more idea of these keys.\n\n :returns: a JSON string containaing a list of the assignable PCI\n devices information\n "
if (not getattr(self, '_list_devices_supported', True)):
return jsonutils.dumps([])
try:
dev_names = (self._conn.listDevices('pci', 0) or [])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_NO_SUPPORT):
self._list_devices_supported = False
LOG.warn(_LW('URI %(uri)s does not support listDevices: %(error)s'), {'uri': self.uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_dev = self._get_pcidev_info(name)
if self._pci_device_assignable(pci_dev):
pci_info.append(pci_dev)
return jsonutils.dumps(pci_info)
|
def get_all_volume_usage(self, context, compute_host_bdms):
'Return usage info for volumes attached to vms on\n a given host.\n '
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug('Trying to get stats for the volume %s', volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id, instance=instance, rd_req=vol_stats[0], rd_bytes=vol_stats[1], wr_req=vol_stats[2], wr_bytes=vol_stats[3], flush_operations=vol_stats[4])
LOG.debug('Got volume usage stats for the volume=%(volume)s, rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d', stats, instance=instance)
vol_usage.append(stats)
return vol_usage
| 7,337,256,096,335,965,000
|
Return usage info for volumes attached to vms on
a given host.
|
nova/virt/libvirt/driver.py
|
get_all_volume_usage
|
srajag/nova
|
python
|
def get_all_volume_usage(self, context, compute_host_bdms):
'Return usage info for volumes attached to vms on\n a given host.\n '
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug('Trying to get stats for the volume %s', volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id, instance=instance, rd_req=vol_stats[0], rd_bytes=vol_stats[1], wr_req=vol_stats[2], wr_bytes=vol_stats[3], flush_operations=vol_stats[4])
LOG.debug('Got volume usage stats for the volume=%(volume)s, rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d', stats, instance=instance)
vol_usage.append(stats)
return vol_usage
|
def block_stats(self, instance_name, disk_id):
'Note that this function takes an instance name.'
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have been detached. Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s'), {'instance_name': instance_name, 'disk': disk_id, 'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. Cannot get block stats for device'), instance_name)
| -682,258,597,712,542,700
|
Note that this function takes an instance name.
|
nova/virt/libvirt/driver.py
|
block_stats
|
srajag/nova
|
python
|
def block_stats(self, instance_name, disk_id):
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have been detached. Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s'), {'instance_name': instance_name, 'disk': disk_id, 'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. Cannot get block stats for device'), instance_name)
|
def interface_stats(self, instance_name, iface_id):
'Note that this function takes an instance name.'
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(iface_id)
| -8,352,032,843,542,878,000
|
Note that this function takes an instance name.
|
nova/virt/libvirt/driver.py
|
interface_stats
|
srajag/nova
|
python
|
def interface_stats(self, instance_name, iface_id):
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(iface_id)
|
def get_available_resource(self, nodename):
'Retrieve resource information.\n\n This method is called when nova-compute launches, and\n as part of a periodic task that records the results in the DB.\n\n :param nodename: will be put in PCI device\n :returns: dictionary containing resource info\n '
stats = self.get_host_stats(refresh=True)
stats['supported_instances'] = jsonutils.dumps(stats['supported_instances'])
return stats
| -6,399,908,168,104,497,000
|
Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
|
nova/virt/libvirt/driver.py
|
get_available_resource
|
srajag/nova
|
python
|
def get_available_resource(self, nodename):
'Retrieve resource information.\n\n This method is called when nova-compute launches, and\n as part of a periodic task that records the results in the DB.\n\n :param nodename: will be put in PCI device\n :returns: dictionary containing resource info\n '
stats = self.get_host_stats(refresh=True)
stats['supported_instances'] = jsonutils.dumps(stats['supported_instances'])
return stats
|
def check_can_live_migrate_destination(self, context, instance, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False):
'Check if it is possible to execute live migration.\n\n This runs checks on the destination host, and then calls\n back to the source host to check the results.\n\n :param context: security context\n :param instance: nova.db.sqlalchemy.models.Instance\n :param block_migration: if true, prepare for block migration\n :param disk_over_commit: if true, allow disk over commit\n :returns: a dict containing:\n :filename: name of the tmpfile under CONF.instances_path\n :block_migration: whether this is block migration\n :disk_over_commit: disk-over-commit factor on dest host\n :disk_available_mb: available disk space on dest host\n '
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = ((disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb)
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
filename = self._create_shared_storage_test_file()
return {'filename': filename, 'image_type': CONF.libvirt.images_type, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'disk_available_mb': disk_available_mb}
| 3,491,805,781,014,162,400
|
Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
|
nova/virt/libvirt/driver.py
|
check_can_live_migrate_destination
|
srajag/nova
|
python
|
def check_can_live_migrate_destination(self, context, instance, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False):
'Check if it is possible to execute live migration.\n\n This runs checks on the destination host, and then calls\n back to the source host to check the results.\n\n :param context: security context\n :param instance: nova.db.sqlalchemy.models.Instance\n :param block_migration: if true, prepare for block migration\n :param disk_over_commit: if true, allow disk over commit\n :returns: a dict containing:\n :filename: name of the tmpfile under CONF.instances_path\n :block_migration: whether this is block migration\n :disk_over_commit: disk-over-commit factor on dest host\n :disk_available_mb: available disk space on dest host\n '
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = ((disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb)
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
filename = self._create_shared_storage_test_file()
return {'filename': filename, 'image_type': CONF.libvirt.images_type, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'disk_available_mb': disk_available_mb}
|
def check_can_live_migrate_destination_cleanup(self, context, dest_check_data):
'Do required cleanup on dest host after check_can_live_migrate calls\n\n :param context: security context\n '
filename = dest_check_data['filename']
self._cleanup_shared_storage_test_file(filename)
| 3,965,159,367,356,769,000
|
Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
|
nova/virt/libvirt/driver.py
|
check_can_live_migrate_destination_cleanup
|
srajag/nova
|
python
|
def check_can_live_migrate_destination_cleanup(self, context, dest_check_data):
'Do required cleanup on dest host after check_can_live_migrate calls\n\n :param context: security context\n '
filename = dest_check_data['filename']
self._cleanup_shared_storage_test_file(filename)
|
def check_can_live_migrate_source(self, context, instance, dest_check_data):
'Check if it is possible to execute live migration.\n\n This checks if the live migration can succeed, based on the\n results from check_can_live_migrate_destination.\n\n :param context: security context\n :param instance: nova.db.sqlalchemy.models.Instance\n :param dest_check_data: result of check_can_live_migrate_destination\n :returns: a dict containing migration info\n '
source = CONF.host
dest_check_data.update({'is_shared_block_storage': self._is_shared_block_storage(instance, dest_check_data)})
dest_check_data.update({'is_shared_instance_path': self._is_shared_instance_path(dest_check_data)})
if dest_check_data['block_migration']:
if (dest_check_data['is_shared_block_storage'] or dest_check_data['is_shared_instance_path']):
reason = _('Block migration can not be used with shared storage.')
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance, dest_check_data['disk_available_mb'], dest_check_data['disk_over_commit'])
elif (not (dest_check_data['is_shared_block_storage'] or dest_check_data['is_shared_instance_path'])):
reason = _('Live migration can not be used without shared storage.')
raise exception.InvalidSharedStorage(reason=reason, path=source)
instance_path = libvirt_utils.get_instance_path(instance, relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
| -5,361,551,892,567,386,000
|
Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
|
nova/virt/libvirt/driver.py
|
check_can_live_migrate_source
|
srajag/nova
|
python
|
def check_can_live_migrate_source(self, context, instance, dest_check_data):
'Check if it is possible to execute live migration.\n\n This checks if the live migration can succeed, based on the\n results from check_can_live_migrate_destination.\n\n :param context: security context\n :param instance: nova.db.sqlalchemy.models.Instance\n :param dest_check_data: result of check_can_live_migrate_destination\n :returns: a dict containing migration info\n '
source = CONF.host
dest_check_data.update({'is_shared_block_storage': self._is_shared_block_storage(instance, dest_check_data)})
dest_check_data.update({'is_shared_instance_path': self._is_shared_instance_path(dest_check_data)})
if dest_check_data['block_migration']:
if (dest_check_data['is_shared_block_storage'] or dest_check_data['is_shared_instance_path']):
reason = _('Block migration can not be used with shared storage.')
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance, dest_check_data['disk_available_mb'], dest_check_data['disk_over_commit'])
elif (not (dest_check_data['is_shared_block_storage'] or dest_check_data['is_shared_instance_path'])):
reason = _('Live migration can not be used without shared storage.')
raise exception.InvalidSharedStorage(reason=reason, path=source)
instance_path = libvirt_utils.get_instance_path(instance, relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
|
def _is_shared_block_storage(self, instance, dest_check_data):
'Check if all block storage of an instance can be shared\n between source and destination of a live migration.\n\n Returns true if the instance is volume backed and has no local disks,\n or if the image backend is the same on source and destination and the\n backend shares block storage between compute nodes.\n '
if ((CONF.libvirt.images_type == dest_check_data.get('image_type')) and self.image_backend.backend().is_shared_block_storage()):
return True
if (dest_check_data.get('is_volume_backed') and (not bool(jsonutils.loads(self.get_instance_disk_info(instance['name']))))):
return True
return False
| -5,988,698,175,857,207,000
|
Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
|
nova/virt/libvirt/driver.py
|
_is_shared_block_storage
|
srajag/nova
|
python
|
def _is_shared_block_storage(self, instance, dest_check_data):
'Check if all block storage of an instance can be shared\n between source and destination of a live migration.\n\n Returns true if the instance is volume backed and has no local disks,\n or if the image backend is the same on source and destination and the\n backend shares block storage between compute nodes.\n '
if ((CONF.libvirt.images_type == dest_check_data.get('image_type')) and self.image_backend.backend().is_shared_block_storage()):
return True
if (dest_check_data.get('is_volume_backed') and (not bool(jsonutils.loads(self.get_instance_disk_info(instance['name']))))):
return True
return False
|
def _is_shared_instance_path(self, dest_check_data):
'Check if instance path is shared between source and\n destination of a live migration.\n '
return self._check_shared_storage_test_file(dest_check_data['filename'])
| 6,241,452,415,291,556,000
|
Check if instance path is shared between source and
destination of a live migration.
|
nova/virt/libvirt/driver.py
|
_is_shared_instance_path
|
srajag/nova
|
python
|
def _is_shared_instance_path(self, dest_check_data):
'Check if instance path is shared between source and\n destination of a live migration.\n '
return self._check_shared_storage_test_file(dest_check_data['filename'])
|
def _assert_dest_node_has_enough_disk(self, context, instance, available_mb, disk_over_commit):
'Checks if destination has enough disk for block migration.'
available = 0
if available_mb:
available = (available_mb * units.Mi)
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
if ((available - necessary) < 0):
reason = (_('Unable to migrate %(instance_uuid)s: Disk of instance is too large(available on destination host:%(available)s < need:%(necessary)s)') % {'instance_uuid': instance['uuid'], 'available': available, 'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
| -3,717,422,254,881,443,000
|
Checks if destination has enough disk for block migration.
|
nova/virt/libvirt/driver.py
|
_assert_dest_node_has_enough_disk
|
srajag/nova
|
python
|
def _assert_dest_node_has_enough_disk(self, context, instance, available_mb, disk_over_commit):
available = 0
if available_mb:
available = (available_mb * units.Mi)
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
if ((available - necessary) < 0):
reason = (_('Unable to migrate %(instance_uuid)s: Disk of instance is too large(available on destination host:%(available)s < need:%(necessary)s)') % {'instance_uuid': instance['uuid'], 'available': available, 'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
|
def _compare_cpu(self, cpu_info):
'Checks the host cpu is compatible to a cpu given by xml.\n "xml" must be a part of libvirt.openAuth(...).getCapabilities().\n return values follows by virCPUCompareResult.\n if 0 > return value, do live migration.\n \'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult\'\n\n :param cpu_info: json string of cpu feature from _get_cpu_info()\n :returns:\n None. if given cpu info is not compatible to this server,\n raise exception.\n '
if (CONF.libvirt.virt_type == 'xen'):
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_LI('Instance launched has CPU info: %s'), cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = unicode(e)
LOG.error(m, {'ret': ret, 'u': u})
if (ret <= 0):
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=(m % {'ret': ret, 'u': u}))
| 6,895,970,308,596,628,000
|
Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openAuth(...).getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string of cpu feature from _get_cpu_info()
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
|
nova/virt/libvirt/driver.py
|
_compare_cpu
|
srajag/nova
|
python
|
def _compare_cpu(self, cpu_info):
'Checks the host cpu is compatible to a cpu given by xml.\n "xml" must be a part of libvirt.openAuth(...).getCapabilities().\n return values follows by virCPUCompareResult.\n if 0 > return value, do live migration.\n \'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult\'\n\n :param cpu_info: json string of cpu feature from _get_cpu_info()\n :returns:\n None. if given cpu info is not compatible to this server,\n raise exception.\n '
if (CONF.libvirt.virt_type == 'xen'):
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_LI('Instance launched has CPU info: %s'), cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = unicode(e)
LOG.error(m, {'ret': ret, 'u': u})
if (ret <= 0):
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=(m % {'ret': ret, 'u': u}))
|
def _create_shared_storage_test_file(self):
'Makes tmpfile under CONF.instances_path.'
dirpath = CONF.instances_path
(fd, tmp_file) = tempfile.mkstemp(dir=dirpath)
LOG.debug('Creating tmpfile %s to notify to other compute nodes that they should mount the same storage.', tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
| 5,045,898,907,620,860,000
|
Makes tmpfile under CONF.instances_path.
|
nova/virt/libvirt/driver.py
|
_create_shared_storage_test_file
|
srajag/nova
|
python
|
def _create_shared_storage_test_file(self):
dirpath = CONF.instances_path
(fd, tmp_file) = tempfile.mkstemp(dir=dirpath)
LOG.debug('Creating tmpfile %s to notify to other compute nodes that they should mount the same storage.', tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
|
def _check_shared_storage_test_file(self, filename):
'Confirms existence of the tmpfile under CONF.instances_path.\n\n Cannot confirm tmpfile return False.\n '
tmp_file = os.path.join(CONF.instances_path, filename)
if (not os.path.exists(tmp_file)):
return False
else:
return True
| 834,513,234,792,377,700
|
Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
|
nova/virt/libvirt/driver.py
|
_check_shared_storage_test_file
|
srajag/nova
|
python
|
def _check_shared_storage_test_file(self, filename):
'Confirms existence of the tmpfile under CONF.instances_path.\n\n Cannot confirm tmpfile return False.\n '
tmp_file = os.path.join(CONF.instances_path, filename)
if (not os.path.exists(tmp_file)):
return False
else:
return True
|
def _cleanup_shared_storage_test_file(self, filename):
'Removes existence of the tmpfile under CONF.instances_path.'
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
| 2,424,444,300,822,148,000
|
Removes existence of the tmpfile under CONF.instances_path.
|
nova/virt/libvirt/driver.py
|
_cleanup_shared_storage_test_file
|
srajag/nova
|
python
|
def _cleanup_shared_storage_test_file(self, filename):
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
|
def ensure_filtering_rules_for_instance(self, instance, network_info):
"Ensure that an instance's filtering rules are enabled.\n\n When migrating an instance, we need the filtering rules to\n be configured on the destination host before starting the\n migration.\n\n Also, when restarting the compute service, we need to ensure\n that filtering rules exist for all running services.\n "
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance, network_info):
break
timeout_count.pop()
if (len(timeout_count) == 0):
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException((msg % instance.name))
greenthread.sleep(1)
| -1,945,658,079,477,859,800
|
Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
|
nova/virt/libvirt/driver.py
|
ensure_filtering_rules_for_instance
|
srajag/nova
|
python
|
def ensure_filtering_rules_for_instance(self, instance, network_info):
"Ensure that an instance's filtering rules are enabled.\n\n When migrating an instance, we need the filtering rules to\n be configured on the destination host before starting the\n migration.\n\n Also, when restarting the compute service, we need to ensure\n that filtering rules exist for all running services.\n "
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance, network_info):
break
timeout_count.pop()
if (len(timeout_count) == 0):
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException((msg % instance.name))
greenthread.sleep(1)
|
def live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None):
'Spawning live_migration operation for distributing high-load.\n\n :param context: security context\n :param instance:\n nova.db.sqlalchemy.models.Instance object\n instance object that is migrated.\n :param dest: destination host\n :param post_method:\n post operation method.\n expected nova.compute.manager._post_live_migration.\n :param recover_method:\n recovery method when any exception occurs.\n expected nova.compute.manager._rollback_live_migration.\n :param block_migration: if true, do block migration.\n :param migrate_data: implementation specific params\n\n '
greenthread.spawn(self._live_migration, context, instance, dest, post_method, recover_method, block_migration, migrate_data)
| 7,227,832,000,531,877,000
|
Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
|
nova/virt/libvirt/driver.py
|
live_migration
|
srajag/nova
|
python
|
def live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None):
'Spawning live_migration operation for distributing high-load.\n\n :param context: security context\n :param instance:\n nova.db.sqlalchemy.models.Instance object\n instance object that is migrated.\n :param dest: destination host\n :param post_method:\n post operation method.\n expected nova.compute.manager._post_live_migration.\n :param recover_method:\n recovery method when any exception occurs.\n expected nova.compute.manager._rollback_live_migration.\n :param block_migration: if true, do block migration.\n :param migrate_data: implementation specific params\n\n '
greenthread.spawn(self._live_migration, context, instance, dest, post_method, recover_method, block_migration, migrate_data)
|
def _live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None):
'Do live migration.\n\n :param context: security context\n :param instance:\n nova.db.sqlalchemy.models.Instance object\n instance object that is migrated.\n :param dest: destination host\n :param post_method:\n post operation method.\n expected nova.compute.manager._post_live_migration.\n :param recover_method:\n recovery method when any exception occurs.\n expected nova.compute.manager._rollback_live_migration.\n :param block_migration: if true, do block migration.\n :param migrate_data: implementation specific params\n '
try:
if block_migration:
flaglist = CONF.libvirt.block_migration_flag.split(',')
else:
flaglist = CONF.libvirt.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce((lambda x, y: (x | y)), flagvals)
dom = self._lookup_by_name(instance['name'])
pre_live_migrate_data = (migrate_data or {}).get('pre_live_migration_result', {})
listen_addrs = pre_live_migrate_data.get('graphics_listen_addrs')
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None)
if ((migratable_flag is None) or (listen_addrs is None)):
self._check_graphics_addresses_can_live_migrate(listen_addrs)
dom.migrateToURI((CONF.libvirt.live_migration_uri % dest), logical_sum, None, CONF.libvirt.live_migration_bandwidth)
else:
old_xml_str = dom.XMLDesc(migratable_flag)
new_xml_str = self._correct_listen_addr(old_xml_str, listen_addrs)
dom.migrateToURI2((CONF.libvirt.live_migration_uri % dest), None, new_xml_str, logical_sum, None, CONF.libvirt.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Live Migration failure: %s'), e, instance=instance)
recover_method(context, instance, dest, block_migration)
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
'waiting for live migration completion.'
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration, migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
| 5,170,598,127,086,826,000
|
Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
|
nova/virt/libvirt/driver.py
|
_live_migration
|
srajag/nova
|
python
|
def _live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None):
'Do live migration.\n\n :param context: security context\n :param instance:\n nova.db.sqlalchemy.models.Instance object\n instance object that is migrated.\n :param dest: destination host\n :param post_method:\n post operation method.\n expected nova.compute.manager._post_live_migration.\n :param recover_method:\n recovery method when any exception occurs.\n expected nova.compute.manager._rollback_live_migration.\n :param block_migration: if true, do block migration.\n :param migrate_data: implementation specific params\n '
try:
if block_migration:
flaglist = CONF.libvirt.block_migration_flag.split(',')
else:
flaglist = CONF.libvirt.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce((lambda x, y: (x | y)), flagvals)
dom = self._lookup_by_name(instance['name'])
pre_live_migrate_data = (migrate_data or {}).get('pre_live_migration_result', {})
listen_addrs = pre_live_migrate_data.get('graphics_listen_addrs')
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None)
if ((migratable_flag is None) or (listen_addrs is None)):
self._check_graphics_addresses_can_live_migrate(listen_addrs)
dom.migrateToURI((CONF.libvirt.live_migration_uri % dest), logical_sum, None, CONF.libvirt.live_migration_bandwidth)
else:
old_xml_str = dom.XMLDesc(migratable_flag)
new_xml_str = self._correct_listen_addr(old_xml_str, listen_addrs)
dom.migrateToURI2((CONF.libvirt.live_migration_uri % dest), None, new_xml_str, logical_sum, None, CONF.libvirt.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Live Migration failure: %s'), e, instance=instance)
recover_method(context, instance, dest, block_migration)
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
'waiting for live migration completion.'
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration, migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
|
def _fetch_instance_kernel_ramdisk(self, context, instance):
'Download kernel and ramdisk for instance in instance directory.'
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'kernel'), instance['kernel_id'], instance['user_id'], instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'ramdisk'), instance['ramdisk_id'], instance['user_id'], instance['project_id'])
| 6,271,825,693,192,430,000
|
Download kernel and ramdisk for instance in instance directory.
|
nova/virt/libvirt/driver.py
|
_fetch_instance_kernel_ramdisk
|
srajag/nova
|
python
|
def _fetch_instance_kernel_ramdisk(self, context, instance):
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'kernel'), instance['kernel_id'], instance['user_id'], instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'ramdisk'), instance['ramdisk_id'], instance['user_id'], instance['project_id'])
|
def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info, destroy_disks=True, migrate_data=None):
'Clean up destination node after a failed live migration.'
self.destroy(context, instance, network_info, block_device_info, destroy_disks, migrate_data)
| -8,457,260,677,069,410,000
|
Clean up destination node after a failed live migration.
|
nova/virt/libvirt/driver.py
|
rollback_live_migration_at_destination
|
srajag/nova
|
python
|
def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info, destroy_disks=True, migrate_data=None):
self.destroy(context, instance, network_info, block_device_info, destroy_disks, migrate_data)
|
def pre_live_migration(self, context, instance, block_device_info, network_info, disk_info, migrate_data=None):
'Preparation live migration.'
is_shared_block_storage = True
is_shared_instance_path = True
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_block_storage = migrate_data.get('is_shared_block_storage', True)
is_shared_instance_path = migrate_data.get('is_shared_instance_path', True)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if (not (is_shared_instance_path and is_shared_block_storage)):
if configdrive.required_by(instance):
raise exception.NoLiveMigrationForConfigDriveInLibVirt()
if (not is_shared_instance_path):
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path, instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
if (not is_shared_block_storage):
self._create_images_and_backing(context, instance, instance_dir, disk_info)
if (not (is_block_migration or is_shared_instance_path)):
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
self._fetch_instance_kernel_ramdisk(context, instance)
block_device_mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, vol)
self._connect_volume(connection_info, disk_info)
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if (cnt == (max_retry - 1)):
raise
else:
LOG.warn(_LW('plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d.'), {'cnt': cnt, 'max_retry': max_retry}, instance=instance)
greenthread.sleep(1)
res_data = {'graphics_listen_addrs': {}}
res_data['graphics_listen_addrs']['vnc'] = CONF.vncserver_listen
res_data['graphics_listen_addrs']['spice'] = CONF.spice.server_listen
return res_data
| 5,580,733,104,593,865,000
|
Preparation live migration.
|
nova/virt/libvirt/driver.py
|
pre_live_migration
|
srajag/nova
|
python
|
def pre_live_migration(self, context, instance, block_device_info, network_info, disk_info, migrate_data=None):
is_shared_block_storage = True
is_shared_instance_path = True
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_block_storage = migrate_data.get('is_shared_block_storage', True)
is_shared_instance_path = migrate_data.get('is_shared_instance_path', True)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if (not (is_shared_instance_path and is_shared_block_storage)):
if configdrive.required_by(instance):
raise exception.NoLiveMigrationForConfigDriveInLibVirt()
if (not is_shared_instance_path):
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path, instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
if (not is_shared_block_storage):
self._create_images_and_backing(context, instance, instance_dir, disk_info)
if (not (is_block_migration or is_shared_instance_path)):
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
self._fetch_instance_kernel_ramdisk(context, instance)
block_device_mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, vol)
self._connect_volume(connection_info, disk_info)
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if (cnt == (max_retry - 1)):
raise
else:
LOG.warn(_LW('plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d.'), {'cnt': cnt, 'max_retry': max_retry}, instance=instance)
greenthread.sleep(1)
res_data = {'graphics_listen_addrs': {}}
res_data['graphics_listen_addrs']['vnc'] = CONF.vncserver_listen
res_data['graphics_listen_addrs']['spice'] = CONF.spice.server_listen
return res_data
|
def _create_images_and_backing(self, context, instance, instance_dir, disk_info_json):
':param context: security context\n :param instance:\n nova.db.sqlalchemy.models.Instance object\n instance object that is migrated.\n :param instance_dir:\n instance path to use, calculated externally to handle block\n migrating an instance with an old style instance path\n :param disk_info_json:\n json strings specified in get_instance_disk_info\n\n '
if (not disk_info_json):
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
instance_disk = os.path.join(instance_dir, base)
if ((not info['backing_file']) and (not os.path.exists(instance_disk))):
libvirt_utils.create_image(info['type'], instance_disk, info['virt_disk_size'])
elif info['backing_file']:
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance, instance_disk, CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral, fs_label=cache_name, os_type=instance['os_type'], filename=cache_name, size=info['virt_disk_size'], ephemeral_size=instance['ephemeral_gb'])
elif cache_name.startswith('swap'):
inst_type = flavors.extract_flavor(instance)
swap_mb = inst_type['swap']
image.cache(fetch_func=self._create_swap, filename=('swap_%s' % swap_mb), size=(swap_mb * units.Mi), swap_mb=swap_mb)
else:
image.cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=cache_name, image_id=instance['image_ref'], user_id=instance['user_id'], project_id=instance['project_id'], size=info['virt_disk_size'])
self._fetch_instance_kernel_ramdisk(context, instance)
| 7,238,436,511,716,076,000
|
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info_json:
json strings specified in get_instance_disk_info
|
nova/virt/libvirt/driver.py
|
_create_images_and_backing
|
srajag/nova
|
python
|
def _create_images_and_backing(self, context, instance, instance_dir, disk_info_json):
':param context: security context\n :param instance:\n nova.db.sqlalchemy.models.Instance object\n instance object that is migrated.\n :param instance_dir:\n instance path to use, calculated externally to handle block\n migrating an instance with an old style instance path\n :param disk_info_json:\n json strings specified in get_instance_disk_info\n\n '
if (not disk_info_json):
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
instance_disk = os.path.join(instance_dir, base)
if ((not info['backing_file']) and (not os.path.exists(instance_disk))):
libvirt_utils.create_image(info['type'], instance_disk, info['virt_disk_size'])
elif info['backing_file']:
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance, instance_disk, CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral, fs_label=cache_name, os_type=instance['os_type'], filename=cache_name, size=info['virt_disk_size'], ephemeral_size=instance['ephemeral_gb'])
elif cache_name.startswith('swap'):
inst_type = flavors.extract_flavor(instance)
swap_mb = inst_type['swap']
image.cache(fetch_func=self._create_swap, filename=('swap_%s' % swap_mb), size=(swap_mb * units.Mi), swap_mb=swap_mb)
else:
image.cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=cache_name, image_id=instance['image_ref'], user_id=instance['user_id'], project_id=instance['project_id'], size=info['virt_disk_size'])
self._fetch_instance_kernel_ramdisk(context, instance)
|
def post_live_migration_at_source(self, context, instance, network_info):
'Unplug VIFs from networks at source.\n\n :param context: security context\n :param instance: instance object reference\n :param network_info: instance network information\n '
self.unplug_vifs(instance, network_info)
| -5,631,838,636,930,248,000
|
Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
|
nova/virt/libvirt/driver.py
|
post_live_migration_at_source
|
srajag/nova
|
python
|
def post_live_migration_at_source(self, context, instance, network_info):
'Unplug VIFs from networks at source.\n\n :param context: security context\n :param instance: instance object reference\n :param network_info: instance network information\n '
self.unplug_vifs(instance, network_info)
|
def post_live_migration_at_destination(self, context, instance, network_info, block_migration=False, block_device_info=None):
'Post operation of live migration at destination host.\n\n :param context: security context\n :param instance:\n nova.db.sqlalchemy.models.Instance object\n instance object that is migrated.\n :param network_info: instance network information\n :param block_migration: if true, post operation of block_migration.\n '
dom_list = self._conn.listDefinedDomains()
if (instance['name'] not in dom_list):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True)
self._conn.defineXML(xml)
| 6,864,859,203,662,335,000
|
Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
|
nova/virt/libvirt/driver.py
|
post_live_migration_at_destination
|
srajag/nova
|
python
|
def post_live_migration_at_destination(self, context, instance, network_info, block_migration=False, block_device_info=None):
'Post operation of live migration at destination host.\n\n :param context: security context\n :param instance:\n nova.db.sqlalchemy.models.Instance object\n instance object that is migrated.\n :param network_info: instance network information\n :param block_migration: if true, post operation of block_migration.\n '
dom_list = self._conn.listDefinedDomains()
if (instance['name'] not in dom_list):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True)
self._conn.defineXML(xml)
|
def _get_disk_over_committed_size_total(self):
'Return total over committed disk size for all instances.'
disk_over_committed_size = 0
for dom in self._list_instance_domains():
try:
xml = dom.XMLDesc(0)
disk_infos = jsonutils.loads(self._get_instance_disk_info(dom.name(), xml))
for info in disk_infos:
disk_over_committed_size += int(info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn((_LW('Error from libvirt while getting description of %(instance_name)s: [Error Code %(error_code)s] %(ex)s') % {'instance_name': dom.name(), 'error_code': error_code, 'ex': ex}))
except OSError as e:
if (e.errno == errno.ENOENT):
LOG.warn(_LW('Periodic task is updating the host stat, it is trying to get disk %(i_name)s, but disk file was removed by concurrent operations such as resize.'), {'i_name': dom.name()})
if (e.errno == errno.EACCES):
LOG.warn(_LW('Periodic task is updating the host stat, it is trying to get disk %(i_name)s, but access is denied. It is most likely due to a VM that exists on the compute node but is not managed by Nova.'), {'i_name': dom.name()})
else:
raise
greenthread.sleep(0)
return disk_over_committed_size
| 3,580,466,839,921,161,000
|
Return total over committed disk size for all instances.
|
nova/virt/libvirt/driver.py
|
_get_disk_over_committed_size_total
|
srajag/nova
|
python
|
def _get_disk_over_committed_size_total(self):
disk_over_committed_size = 0
for dom in self._list_instance_domains():
try:
xml = dom.XMLDesc(0)
disk_infos = jsonutils.loads(self._get_instance_disk_info(dom.name(), xml))
for info in disk_infos:
disk_over_committed_size += int(info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn((_LW('Error from libvirt while getting description of %(instance_name)s: [Error Code %(error_code)s] %(ex)s') % {'instance_name': dom.name(), 'error_code': error_code, 'ex': ex}))
except OSError as e:
if (e.errno == errno.ENOENT):
LOG.warn(_LW('Periodic task is updating the host stat, it is trying to get disk %(i_name)s, but disk file was removed by concurrent operations such as resize.'), {'i_name': dom.name()})
if (e.errno == errno.EACCES):
LOG.warn(_LW('Periodic task is updating the host stat, it is trying to get disk %(i_name)s, but access is denied. It is most likely due to a VM that exists on the compute node but is not managed by Nova.'), {'i_name': dom.name()})
else:
raise
greenthread.sleep(0)
return disk_over_committed_size
|
def unfilter_instance(self, instance, network_info):
'See comments of same method in firewall_driver.'
self.firewall_driver.unfilter_instance(instance, network_info=network_info)
| -4,260,456,343,576,868,000
|
See comments of same method in firewall_driver.
|
nova/virt/libvirt/driver.py
|
unfilter_instance
|
srajag/nova
|
python
|
def unfilter_instance(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance, network_info=network_info)
|
def get_host_stats(self, refresh=False):
"Return the current state of the host.\n\n If 'refresh' is True, run update the stats first.\n "
return self.host_state.get_host_stats(refresh=refresh)
| -4,014,127,448,018,930,700
|
Return the current state of the host.
If 'refresh' is True, run update the stats first.
|
nova/virt/libvirt/driver.py
|
get_host_stats
|
srajag/nova
|
python
|
def get_host_stats(self, refresh=False):
"Return the current state of the host.\n\n If 'refresh' is True, run update the stats first.\n "
return self.host_state.get_host_stats(refresh=refresh)
|
def get_host_cpu_stats(self):
'Return the current CPU state of the host.'
stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
stats['frequency'] = self._conn.getInfo()[3]
return stats
| 3,507,474,525,421,915,600
|
Return the current CPU state of the host.
|
nova/virt/libvirt/driver.py
|
get_host_cpu_stats
|
srajag/nova
|
python
|
def get_host_cpu_stats(self):
stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
stats['frequency'] = self._conn.getInfo()[3]
return stats
|
def get_host_uptime(self, host):
'Returns the result of calling "uptime".'
(out, err) = utils.execute('env', 'LANG=C', 'uptime')
return out
| 9,065,312,545,126,296,000
|
Returns the result of calling "uptime".
|
nova/virt/libvirt/driver.py
|
get_host_uptime
|
srajag/nova
|
python
|
def get_host_uptime(self, host):
(out, err) = utils.execute('env', 'LANG=C', 'uptime')
return out
|
def manage_image_cache(self, context, all_instances):
'Manage the local cache of images.'
self.image_cache_manager.update(context, all_instances)
| 6,971,867,045,383,601,000
|
Manage the local cache of images.
|
nova/virt/libvirt/driver.py
|
manage_image_cache
|
srajag/nova
|
python
|
def manage_image_cache(self, context, all_instances):
self.image_cache_manager.update(context, all_instances)
|
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize, shared_storage=False):
'Used only for cleanup in case migrate_disk_and_power_off fails.'
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if (not shared_storage):
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
| -7,738,954,416,765,412,000
|
Used only for cleanup in case migrate_disk_and_power_off fails.
|
nova/virt/libvirt/driver.py
|
_cleanup_remote_migration
|
srajag/nova
|
python
|
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize, shared_storage=False):
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if (not shared_storage):
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
|
@staticmethod
def _disk_size_from_instance(instance, info):
'Determines the disk size from instance properties\n\n Returns the disk size by using the disk name to determine whether it\n is a root or an ephemeral disk, then by checking properties of the\n instance returns the size converted to bytes.\n\n Returns 0 if the disk name not match (disk, disk.local).\n '
fname = os.path.basename(info['path'])
if (fname == 'disk'):
size = instance['root_gb']
elif (fname == 'disk.local'):
size = instance['ephemeral_gb']
else:
size = 0
return (size * units.Gi)
| -3,037,090,758,507,430,000
|
Determines the disk size from instance properties
Returns the disk size by using the disk name to determine whether it
is a root or an ephemeral disk, then by checking properties of the
instance returns the size converted to bytes.
Returns 0 if the disk name not match (disk, disk.local).
|
nova/virt/libvirt/driver.py
|
_disk_size_from_instance
|
srajag/nova
|
python
|
@staticmethod
def _disk_size_from_instance(instance, info):
'Determines the disk size from instance properties\n\n Returns the disk size by using the disk name to determine whether it\n is a root or an ephemeral disk, then by checking properties of the\n instance returns the size converted to bytes.\n\n Returns 0 if the disk name not match (disk, disk.local).\n '
fname = os.path.basename(info['path'])
if (fname == 'disk'):
size = instance['root_gb']
elif (fname == 'disk.local'):
size = instance['ephemeral_gb']
else:
size = 0
return (size * units.Gi)
|
@staticmethod
def _disk_raw_to_qcow2(path):
'Converts a raw disk to qcow2.'
path_qcow = (path + '_qcow')
utils.execute('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
| 5,152,713,984,113,876,000
|
Converts a raw disk to qcow2.
|
nova/virt/libvirt/driver.py
|
_disk_raw_to_qcow2
|
srajag/nova
|
python
|
@staticmethod
def _disk_raw_to_qcow2(path):
path_qcow = (path + '_qcow')
utils.execute('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
|
@staticmethod
def _disk_qcow2_to_raw(path):
'Converts a qcow2 disk to raw.'
path_raw = (path + '_raw')
utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
| -5,464,065,104,567,348,000
|
Converts a qcow2 disk to raw.
|
nova/virt/libvirt/driver.py
|
_disk_qcow2_to_raw
|
srajag/nova
|
python
|
@staticmethod
def _disk_qcow2_to_raw(path):
path_raw = (path + '_raw')
utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
|
def _disk_resize(self, info, size):
'Attempts to resize a disk to size\n\n Attempts to resize a disk by checking the capabilities and\n preparing the format, then calling disk.api.extend.\n\n Note: Currently only support disk extend.\n '
fmt = info['type']
pth = info['path']
if (size and (fmt == 'qcow2') and disk.can_resize_image(pth, size) and disk.is_image_partitionless(pth, use_cow=True)):
self._disk_qcow2_to_raw(pth)
fmt = 'raw'
if size:
use_cow = (fmt == 'qcow2')
disk.extend(pth, size, use_cow=use_cow)
if ((fmt == 'raw') and CONF.use_cow_images):
self._disk_raw_to_qcow2(pth)
| 4,721,868,778,200,022,000
|
Attempts to resize a disk to size
Attempts to resize a disk by checking the capabilities and
preparing the format, then calling disk.api.extend.
Note: Currently only support disk extend.
|
nova/virt/libvirt/driver.py
|
_disk_resize
|
srajag/nova
|
python
|
def _disk_resize(self, info, size):
'Attempts to resize a disk to size\n\n Attempts to resize a disk by checking the capabilities and\n preparing the format, then calling disk.api.extend.\n\n Note: Currently only support disk extend.\n '
fmt = info['type']
pth = info['path']
if (size and (fmt == 'qcow2') and disk.can_resize_image(pth, size) and disk.is_image_partitionless(pth, use_cow=True)):
self._disk_qcow2_to_raw(pth)
fmt = 'raw'
if size:
use_cow = (fmt == 'qcow2')
disk.extend(pth, size, use_cow=use_cow)
if ((fmt == 'raw') and CONF.use_cow_images):
self._disk_raw_to_qcow2(pth)
|
def _cleanup_failed_migration(self, inst_base):
"Make sure that a failed migrate doesn't prevent us from rolling\n back in a revert.\n "
try:
shutil.rmtree(inst_base)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
| 7,264,325,835,884,052,000
|
Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
|
nova/virt/libvirt/driver.py
|
_cleanup_failed_migration
|
srajag/nova
|
python
|
def _cleanup_failed_migration(self, inst_base):
"Make sure that a failed migrate doesn't prevent us from rolling\n back in a revert.\n "
try:
shutil.rmtree(inst_base)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
|
def confirm_migration(self, migration, instance, network_info):
'Confirms a resize, destroying the source VM.'
self._cleanup_resize(instance, network_info)
| -2,616,171,590,583,491,600
|
Confirms a resize, destroying the source VM.
|
nova/virt/libvirt/driver.py
|
confirm_migration
|
srajag/nova
|
python
|
def confirm_migration(self, migration, instance, network_info):
self._cleanup_resize(instance, network_info)
|
def get_host_stats(self, refresh=False):
"Return the current state of the host.\n\n If 'refresh' is True, run update the stats first.\n "
if (refresh or (not self._stats)):
self.update_status()
return self._stats
| 6,806,025,530,485,851,000
|
Return the current state of the host.
If 'refresh' is True, run update the stats first.
|
nova/virt/libvirt/driver.py
|
get_host_stats
|
srajag/nova
|
python
|
def get_host_stats(self, refresh=False):
"Return the current state of the host.\n\n If 'refresh' is True, run update the stats first.\n "
if (refresh or (not self._stats)):
self.update_status()
return self._stats
|
def update_status(self):
'Retrieve status info from libvirt.'
def _get_disk_available_least():
'Return total real disk available least size.\n\n The size of available disk, when block_migration command given\n disk_over_commit param is FALSE.\n\n The size that deducted real instance disk size from the total size\n of the virtual disk of all instances.\n\n '
disk_free_gb = disk_info_dict['free']
disk_over_committed = self.driver._get_disk_over_committed_size_total()
available_least = ((disk_free_gb * units.Gi) - disk_over_committed)
return (available_least / units.Gi)
LOG.debug('Updating host stats')
disk_info_dict = self.driver._get_local_gb_info()
data = {}
data['supported_instances'] = self.driver._get_instance_capabilities()
data['vcpus'] = self.driver._get_vcpu_total()
data['memory_mb'] = self.driver._get_memory_mb_total()
data['local_gb'] = disk_info_dict['total']
data['vcpus_used'] = self.driver._get_vcpu_used()
data['memory_mb_used'] = self.driver._get_memory_mb_used()
data['local_gb_used'] = disk_info_dict['used']
data['hypervisor_type'] = self.driver._get_hypervisor_type()
data['hypervisor_version'] = self.driver._get_hypervisor_version()
data['hypervisor_hostname'] = self.driver._get_hypervisor_hostname()
data['cpu_info'] = self.driver._get_cpu_info()
data['disk_available_least'] = _get_disk_available_least()
data['pci_passthrough_devices'] = self.driver._get_pci_passthrough_devices()
self._stats = data
return data
| -4,228,633,569,821,032,400
|
Retrieve status info from libvirt.
|
nova/virt/libvirt/driver.py
|
update_status
|
srajag/nova
|
python
|
def update_status(self):
def _get_disk_available_least():
'Return total real disk available least size.\n\n The size of available disk, when block_migration command given\n disk_over_commit param is FALSE.\n\n The size that deducted real instance disk size from the total size\n of the virtual disk of all instances.\n\n '
disk_free_gb = disk_info_dict['free']
disk_over_committed = self.driver._get_disk_over_committed_size_total()
available_least = ((disk_free_gb * units.Gi) - disk_over_committed)
return (available_least / units.Gi)
LOG.debug('Updating host stats')
disk_info_dict = self.driver._get_local_gb_info()
data = {}
data['supported_instances'] = self.driver._get_instance_capabilities()
data['vcpus'] = self.driver._get_vcpu_total()
data['memory_mb'] = self.driver._get_memory_mb_total()
data['local_gb'] = disk_info_dict['total']
data['vcpus_used'] = self.driver._get_vcpu_used()
data['memory_mb_used'] = self.driver._get_memory_mb_used()
data['local_gb_used'] = disk_info_dict['used']
data['hypervisor_type'] = self.driver._get_hypervisor_type()
data['hypervisor_version'] = self.driver._get_hypervisor_version()
data['hypervisor_hostname'] = self.driver._get_hypervisor_hostname()
data['cpu_info'] = self.driver._get_cpu_info()
data['disk_available_least'] = _get_disk_available_least()
data['pci_passthrough_devices'] = self.driver._get_pci_passthrough_devices()
self._stats = data
return data
|
def _wait_for_destroy(expected_domid):
'Called at an interval until the VM is gone.'
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.InstanceNotFound:
LOG.warning(_LW('During wait destroy, instance disappeared.'), instance=instance)
raise loopingcall.LoopingCallDone()
if (state == power_state.SHUTDOWN):
LOG.info(_LI('Instance destroyed successfully.'), instance=instance)
raise loopingcall.LoopingCallDone()
if (new_domid != expected_domid):
LOG.info(_LI('Instance may be started again.'), instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
| 5,439,729,913,305,656,000
|
Called at an interval until the VM is gone.
|
nova/virt/libvirt/driver.py
|
_wait_for_destroy
|
srajag/nova
|
python
|
def _wait_for_destroy(expected_domid):
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.InstanceNotFound:
LOG.warning(_LW('During wait destroy, instance disappeared.'), instance=instance)
raise loopingcall.LoopingCallDone()
if (state == power_state.SHUTDOWN):
LOG.info(_LI('Instance destroyed successfully.'), instance=instance)
raise loopingcall.LoopingCallDone()
if (new_domid != expected_domid):
LOG.info(_LI('Instance may be started again.'), instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
|
def _wait_for_reboot():
'Called at an interval until the VM is running again.'
state = self.get_info(instance)['state']
if (state == power_state.RUNNING):
LOG.info(_LI('Instance rebooted successfully.'), instance=instance)
raise loopingcall.LoopingCallDone()
| -6,929,160,274,796,647,000
|
Called at an interval until the VM is running again.
|
nova/virt/libvirt/driver.py
|
_wait_for_reboot
|
srajag/nova
|
python
|
def _wait_for_reboot():
state = self.get_info(instance)['state']
if (state == power_state.RUNNING):
LOG.info(_LI('Instance rebooted successfully.'), instance=instance)
raise loopingcall.LoopingCallDone()
|
def _wait_for_boot():
'Called at an interval until the VM is running.'
state = self.get_info(instance)['state']
if (state == power_state.RUNNING):
LOG.info(_LI('Instance spawned successfully.'), instance=instance)
raise loopingcall.LoopingCallDone()
| -3,637,270,590,012,610,000
|
Called at an interval until the VM is running.
|
nova/virt/libvirt/driver.py
|
_wait_for_boot
|
srajag/nova
|
python
|
def _wait_for_boot():
state = self.get_info(instance)['state']
if (state == power_state.RUNNING):
LOG.info(_LI('Instance spawned successfully.'), instance=instance)
raise loopingcall.LoopingCallDone()
|
def _get_device_type(cfgdev):
"Get a PCI device's device type.\n\n An assignable PCI device can be a normal PCI device,\n a SR-IOV Physical Function (PF), or a SR-IOV Virtual\n Function (VF). Only normal PCI devices or SR-IOV VFs\n are assignable, while SR-IOV PFs are always owned by\n hypervisor.\n\n Please notice that a PCI device with SR-IOV\n capability but not enabled is reported as normal PCI device.\n "
for fun_cap in cfgdev.pci_capability.fun_capability:
if (len(fun_cap.device_addrs) != 0):
if (fun_cap.type == 'virt_functions'):
return {'dev_type': 'type-PF'}
if (fun_cap.type == 'phys_function'):
phys_address = ('%s:%s:%s.%s' % (fun_cap.device_addrs[0][0].replace('0x', ''), fun_cap.device_addrs[0][1].replace('0x', ''), fun_cap.device_addrs[0][2].replace('0x', ''), fun_cap.device_addrs[0][3].replace('0x', '')))
return {'dev_type': 'type-VF', 'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
| 993,374,100,423,534,300
|
Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
|
nova/virt/libvirt/driver.py
|
_get_device_type
|
srajag/nova
|
python
|
def _get_device_type(cfgdev):
"Get a PCI device's device type.\n\n An assignable PCI device can be a normal PCI device,\n a SR-IOV Physical Function (PF), or a SR-IOV Virtual\n Function (VF). Only normal PCI devices or SR-IOV VFs\n are assignable, while SR-IOV PFs are always owned by\n hypervisor.\n\n Please notice that a PCI device with SR-IOV\n capability but not enabled is reported as normal PCI device.\n "
for fun_cap in cfgdev.pci_capability.fun_capability:
if (len(fun_cap.device_addrs) != 0):
if (fun_cap.type == 'virt_functions'):
return {'dev_type': 'type-PF'}
if (fun_cap.type == 'phys_function'):
phys_address = ('%s:%s:%s.%s' % (fun_cap.device_addrs[0][0].replace('0x', ), fun_cap.device_addrs[0][1].replace('0x', ), fun_cap.device_addrs[0][2].replace('0x', ), fun_cap.device_addrs[0][3].replace('0x', )))
return {'dev_type': 'type-VF', 'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
|
def wait_for_live_migration():
'waiting for live migration completion.'
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration, migrate_data)
| -3,174,638,724,439,251,000
|
waiting for live migration completion.
|
nova/virt/libvirt/driver.py
|
wait_for_live_migration
|
srajag/nova
|
python
|
def wait_for_live_migration():
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration, migrate_data)
|
def get_io_devices(xml_doc):
'get the list of io devices from the xml document.'
result = {'volumes': [], 'ifaces': []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'), ('./devices/interface', 'ifaces')]
for (block, key) in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if ((child.tag == 'target') and child.get('dev')):
result[key].append(child.get('dev'))
return result
| 6,174,880,537,626,591,000
|
get the list of io devices from the xml document.
|
nova/virt/libvirt/driver.py
|
get_io_devices
|
srajag/nova
|
python
|
def get_io_devices(xml_doc):
result = {'volumes': [], 'ifaces': []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'), ('./devices/interface', 'ifaces')]
for (block, key) in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if ((child.tag == 'target') and child.get('dev')):
result[key].append(child.get('dev'))
return result
|
def _get_disk_available_least():
'Return total real disk available least size.\n\n The size of available disk, when block_migration command given\n disk_over_commit param is FALSE.\n\n The size that deducted real instance disk size from the total size\n of the virtual disk of all instances.\n\n '
disk_free_gb = disk_info_dict['free']
disk_over_committed = self.driver._get_disk_over_committed_size_total()
available_least = ((disk_free_gb * units.Gi) - disk_over_committed)
return (available_least / units.Gi)
| -3,710,712,168,234,871,300
|
Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
|
nova/virt/libvirt/driver.py
|
_get_disk_available_least
|
srajag/nova
|
python
|
def _get_disk_available_least():
'Return total real disk available least size.\n\n The size of available disk, when block_migration command given\n disk_over_commit param is FALSE.\n\n The size that deducted real instance disk size from the total size\n of the virtual disk of all instances.\n\n '
disk_free_gb = disk_info_dict['free']
disk_over_committed = self.driver._get_disk_over_committed_size_total()
available_least = ((disk_free_gb * units.Gi) - disk_over_committed)
return (available_least / units.Gi)
|
@staticmethod
def configureFromCommandline(db, serviceObject, args):
'Subclasses should take the remaining args from the commandline and configure using them'
pass
| 6,080,763,618,851,087,000
|
Subclasses should take the remaining args from the commandline and configure using them
|
object_database/service_manager/ServiceBase.py
|
configureFromCommandline
|
braxtonmckee/nativepython
|
python
|
@staticmethod
def configureFromCommandline(db, serviceObject, args):
pass
|
@pre_load
def set_kubernetes_job_timeout(self, in_data, **kwargs):
'Set kubernetes_job_timeout to a default value if not provided and validate the value.\n\n Method receives the whole data dictionary but operates *only* on kubernetes_job_timeout.\n Updated dictionary is returned.\n '
if ('kubernetes_job_timeout' not in in_data):
try:
in_data['kubernetes_job_timeout'] = int(REANA_KUBERNETES_JOBS_TIMEOUT_LIMIT)
except (ValueError, TypeError):
raise ValidationError(f"Default value of kubernetes_job_timeout is not an integer. Provided value is '{REANA_KUBERNETES_JOBS_TIMEOUT_LIMIT}'. Please contact the administrator.")
job_timeout = in_data['kubernetes_job_timeout']
try:
job_timeout = int(job_timeout)
except (ValueError, TypeError):
raise ValidationError(f"kubernetes_job_timeout must be an integer. Provided value is '{job_timeout}'.")
if (job_timeout <= 0):
raise ValidationError(f'kubernetes_job_timeout must be greater than 0.Provided value is {job_timeout}.')
try:
max_value = int(REANA_KUBERNETES_JOBS_MAX_USER_TIMEOUT_LIMIT)
except (ValueError, TypeError):
raise ValidationError(f"Max value for kubernetes_job_timeout is not an integer. Provided value is '{REANA_KUBERNETES_JOBS_MAX_USER_TIMEOUT_LIMIT}'. Please contact the administrator.")
if (job_timeout > max_value):
raise ValidationError(f'kubernetes_job_timeout exceeds maximum allowed value of {max_value} seconds. Provided value is {job_timeout} seconds.')
in_data['kubernetes_job_timeout'] = job_timeout
return in_data
| 883,647,504,958,752,000
|
Set kubernetes_job_timeout to a default value if not provided and validate the value.
Method receives the whole data dictionary but operates *only* on kubernetes_job_timeout.
Updated dictionary is returned.
|
reana_job_controller/schemas.py
|
set_kubernetes_job_timeout
|
focilo/focilo-job-controller
|
python
|
@pre_load
def set_kubernetes_job_timeout(self, in_data, **kwargs):
'Set kubernetes_job_timeout to a default value if not provided and validate the value.\n\n Method receives the whole data dictionary but operates *only* on kubernetes_job_timeout.\n Updated dictionary is returned.\n '
if ('kubernetes_job_timeout' not in in_data):
try:
in_data['kubernetes_job_timeout'] = int(REANA_KUBERNETES_JOBS_TIMEOUT_LIMIT)
except (ValueError, TypeError):
raise ValidationError(f"Default value of kubernetes_job_timeout is not an integer. Provided value is '{REANA_KUBERNETES_JOBS_TIMEOUT_LIMIT}'. Please contact the administrator.")
job_timeout = in_data['kubernetes_job_timeout']
try:
job_timeout = int(job_timeout)
except (ValueError, TypeError):
raise ValidationError(f"kubernetes_job_timeout must be an integer. Provided value is '{job_timeout}'.")
if (job_timeout <= 0):
raise ValidationError(f'kubernetes_job_timeout must be greater than 0.Provided value is {job_timeout}.')
try:
max_value = int(REANA_KUBERNETES_JOBS_MAX_USER_TIMEOUT_LIMIT)
except (ValueError, TypeError):
raise ValidationError(f"Max value for kubernetes_job_timeout is not an integer. Provided value is '{REANA_KUBERNETES_JOBS_MAX_USER_TIMEOUT_LIMIT}'. Please contact the administrator.")
if (job_timeout > max_value):
raise ValidationError(f'kubernetes_job_timeout exceeds maximum allowed value of {max_value} seconds. Provided value is {job_timeout} seconds.')
in_data['kubernetes_job_timeout'] = job_timeout
return in_data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.