repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/mem/persist.rs | src/vmm/src/devices/virtio/mem/persist.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring virtio-mem devices.
use std::sync::Arc;
use bitvec::vec::BitVec;
use serde::{Deserialize, Serialize};
use vm_memory::Address;
use crate::Vm;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_MEM;
use crate::devices::virtio::generated::virtio_mem::virtio_mem_config;
use crate::devices::virtio::mem::{MEM_NUM_QUEUES, VirtioMem, VirtioMemError};
use crate::devices::virtio::persist::{PersistError as VirtioStateError, VirtioDeviceState};
use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE;
use crate::snapshot::Persist;
use crate::utils::usize_to_u64;
use crate::vstate::memory::{GuestMemoryMmap, GuestRegionMmap};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VirtioMemState {
pub virtio_state: VirtioDeviceState,
addr: u64,
region_size: u64,
block_size: u64,
usable_region_size: u64,
requested_size: u64,
slot_size: usize,
plugged_blocks: Vec<bool>,
}
#[derive(Debug)]
pub struct VirtioMemConstructorArgs {
vm: Arc<Vm>,
}
impl VirtioMemConstructorArgs {
pub fn new(vm: Arc<Vm>) -> Self {
Self { vm }
}
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VirtioMemPersistError {
/// Create virtio-mem: {0}
CreateVirtioMem(#[from] VirtioMemError),
/// Virtio state: {0}
VirtioState(#[from] VirtioStateError),
}
impl Persist<'_> for VirtioMem {
type State = VirtioMemState;
type ConstructorArgs = VirtioMemConstructorArgs;
type Error = VirtioMemPersistError;
fn save(&self) -> Self::State {
VirtioMemState {
virtio_state: VirtioDeviceState::from_device(self),
addr: self.config.addr,
region_size: self.config.region_size,
block_size: self.config.block_size,
usable_region_size: self.config.usable_region_size,
plugged_blocks: self.plugged_blocks.iter().by_vals().collect(),
requested_size: self.config.requested_size,
slot_size: self.slot_size,
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let queues = state.virtio_state.build_queues_checked(
constructor_args.vm.guest_memory(),
VIRTIO_ID_MEM,
MEM_NUM_QUEUES,
FIRECRACKER_MAX_QUEUE_SIZE,
)?;
let plugged_blocks = BitVec::from_iter(state.plugged_blocks.iter());
let config = virtio_mem_config {
addr: state.addr,
region_size: state.region_size,
block_size: state.block_size,
usable_region_size: state.usable_region_size,
plugged_size: usize_to_u64(plugged_blocks.count_ones()) * state.block_size,
requested_size: state.requested_size,
..Default::default()
};
let mut virtio_mem = VirtioMem::from_state(
constructor_args.vm,
queues,
config,
state.slot_size,
plugged_blocks,
)?;
virtio_mem.set_avail_features(state.virtio_state.avail_features);
virtio_mem.set_acked_features(state.virtio_state.acked_features);
Ok(virtio_mem)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::mem::device::test_utils::default_virtio_mem;
use crate::vstate::vm::tests::setup_vm_with_memory;
#[test]
fn test_save_state() {
let dev = default_virtio_mem();
let state = dev.save();
assert_eq!(state.addr, dev.config.addr);
assert_eq!(state.region_size, dev.config.region_size);
assert_eq!(state.block_size, dev.config.block_size);
assert_eq!(state.usable_region_size, dev.config.usable_region_size);
assert_eq!(
state.plugged_blocks.iter().collect::<BitVec>(),
dev.plugged_blocks
);
assert_eq!(state.requested_size, dev.config.requested_size);
assert_eq!(state.slot_size, dev.slot_size);
}
#[test]
fn test_save_restore_state() {
let mut original_dev = default_virtio_mem();
original_dev.set_acked_features(original_dev.avail_features());
let state = original_dev.save();
// Create a "new" VM for restore
let (_, vm) = setup_vm_with_memory(0x1000);
let vm = Arc::new(vm);
let constructor_args = VirtioMemConstructorArgs::new(vm);
let restored_dev = VirtioMem::restore(constructor_args, &state).unwrap();
assert_eq!(original_dev.config, restored_dev.config);
assert_eq!(original_dev.slot_size, restored_dev.slot_size);
assert_eq!(original_dev.avail_features(), restored_dev.avail_features());
assert_eq!(original_dev.acked_features(), restored_dev.acked_features());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/mem/device.rs | src/vmm/src/devices/virtio/mem/device.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::io;
use std::ops::{Deref, Range};
use std::sync::Arc;
use std::sync::atomic::AtomicU32;
use bitvec::vec::BitVec;
use log::info;
use serde::{Deserialize, Serialize};
use vm_memory::{
Address, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryRegion, GuestUsize,
};
use vmm_sys_util::eventfd::EventFd;
use super::{MEM_NUM_QUEUES, MEM_QUEUE};
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::device::{ActiveState, DeviceState, VirtioDevice};
use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_MEM;
use crate::devices::virtio::generated::virtio_mem::{
self, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, virtio_mem_config,
};
use crate::devices::virtio::iov_deque::IovDequeError;
use crate::devices::virtio::mem::VIRTIO_MEM_DEV_ID;
use crate::devices::virtio::mem::metrics::METRICS;
use crate::devices::virtio::mem::request::{BlockRangeState, Request, RequestedRange, Response};
use crate::devices::virtio::queue::{
DescriptorChain, FIRECRACKER_MAX_QUEUE_SIZE, InvalidAvailIdx, Queue, QueueError,
};
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::logger::{IncMetric, debug, error};
use crate::utils::{bytes_to_mib, mib_to_bytes, u64_to_usize, usize_to_u64};
use crate::vstate::interrupts::InterruptError;
use crate::vstate::memory::{
ByteValued, GuestMemoryExtension, GuestMemoryMmap, GuestRegionMmap, GuestRegionType,
};
use crate::vstate::vm::VmError;
use crate::{Vm, impl_device_type};
// SAFETY: virtio_mem_config only contains plain data types
unsafe impl ByteValued for virtio_mem_config {}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VirtioMemError {
/// Error while handling an Event file descriptor: {0}
EventFd(#[from] io::Error),
/// Received error while sending an interrupt: {0}
InterruptError(#[from] InterruptError),
/// Size {0} is invalid: it must be a multiple of block size and less than the total size
InvalidSize(u64),
/// Device is not active
DeviceNotActive,
/// Descriptor is write-only
UnexpectedWriteOnlyDescriptor,
/// Error reading virtio descriptor
DescriptorWriteFailed,
/// Error writing virtio descriptor
DescriptorReadFailed,
/// Unknown request type: {0}
UnknownRequestType(u32),
/// Descriptor chain is too short
DescriptorChainTooShort,
/// Descriptor is too small
DescriptorLengthTooSmall,
/// Descriptor is read-only
UnexpectedReadOnlyDescriptor,
/// Error popping from virtio queue: {0}
InvalidAvailIdx(#[from] InvalidAvailIdx),
/// Error adding used queue: {0}
QueueError(#[from] QueueError),
/// Invalid requested range: {0:?}.
InvalidRange(RequestedRange),
/// The requested range cannot be plugged because it's {0:?}.
PlugRequestBlockStateInvalid(BlockRangeState),
/// Plug request rejected as plugged_size would be greater than requested_size
PlugRequestIsTooBig,
/// The requested range cannot be unplugged because it's {0:?}.
UnplugRequestBlockStateInvalid(BlockRangeState),
/// There was an error updating the KVM slot.
UpdateKvmSlot(VmError),
}
#[derive(Debug)]
pub struct VirtioMem {
// VirtIO fields
avail_features: u64,
acked_features: u64,
activate_event: EventFd,
// Transport fields
device_state: DeviceState,
pub(crate) queues: Vec<Queue>,
queue_events: Vec<EventFd>,
// Device specific fields
pub(crate) config: virtio_mem_config,
pub(crate) slot_size: usize,
// Bitmap to track which blocks are plugged
pub(crate) plugged_blocks: BitVec,
vm: Arc<Vm>,
}
/// Memory hotplug device status information.
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct VirtioMemStatus {
/// Block size in MiB.
pub block_size_mib: usize,
/// Total memory size in MiB that can be hotplugged.
pub total_size_mib: usize,
/// Size of the KVM slots in MiB.
pub slot_size_mib: usize,
/// Currently plugged memory size in MiB.
pub plugged_size_mib: usize,
/// Requested memory size in MiB.
pub requested_size_mib: usize,
}
impl VirtioMem {
pub fn new(
vm: Arc<Vm>,
addr: GuestAddress,
total_size_mib: usize,
block_size_mib: usize,
slot_size_mib: usize,
) -> Result<Self, VirtioMemError> {
let queues = vec![Queue::new(FIRECRACKER_MAX_QUEUE_SIZE); MEM_NUM_QUEUES];
let config = virtio_mem_config {
addr: addr.raw_value(),
region_size: mib_to_bytes(total_size_mib) as u64,
block_size: mib_to_bytes(block_size_mib) as u64,
..Default::default()
};
let plugged_blocks = BitVec::repeat(false, total_size_mib / block_size_mib);
Self::from_state(
vm,
queues,
config,
mib_to_bytes(slot_size_mib),
plugged_blocks,
)
}
pub fn from_state(
vm: Arc<Vm>,
queues: Vec<Queue>,
config: virtio_mem_config,
slot_size: usize,
plugged_blocks: BitVec,
) -> Result<Self, VirtioMemError> {
let activate_event = EventFd::new(libc::EFD_NONBLOCK)?;
let queue_events = (0..MEM_NUM_QUEUES)
.map(|_| EventFd::new(libc::EFD_NONBLOCK))
.collect::<Result<Vec<EventFd>, io::Error>>()?;
Ok(Self {
avail_features: (1 << VIRTIO_F_VERSION_1) | (1 << VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE),
acked_features: 0u64,
activate_event,
device_state: DeviceState::Inactive,
queues,
queue_events,
config,
vm,
slot_size,
plugged_blocks,
})
}
pub fn id(&self) -> &str {
VIRTIO_MEM_DEV_ID
}
pub fn guest_address(&self) -> GuestAddress {
GuestAddress(self.config.addr)
}
/// Gets the total hotpluggable size.
pub fn total_size_mib(&self) -> usize {
bytes_to_mib(u64_to_usize(self.config.region_size))
}
/// Gets the block size.
pub fn block_size_mib(&self) -> usize {
bytes_to_mib(u64_to_usize(self.config.block_size))
}
/// Gets the block size.
pub fn slot_size_mib(&self) -> usize {
bytes_to_mib(self.slot_size)
}
/// Gets the total size of the plugged memory blocks.
pub fn plugged_size_mib(&self) -> usize {
bytes_to_mib(u64_to_usize(self.config.plugged_size))
}
/// Gets the requested size
pub fn requested_size_mib(&self) -> usize {
bytes_to_mib(u64_to_usize(self.config.requested_size))
}
pub fn status(&self) -> VirtioMemStatus {
VirtioMemStatus {
block_size_mib: self.block_size_mib(),
total_size_mib: self.total_size_mib(),
slot_size_mib: self.slot_size_mib(),
plugged_size_mib: self.plugged_size_mib(),
requested_size_mib: self.requested_size_mib(),
}
}
fn signal_used_queue(&self) -> Result<(), VirtioMemError> {
self.interrupt_trigger()
.trigger(VirtioInterruptType::Queue(MEM_QUEUE.try_into().unwrap()))
.map_err(VirtioMemError::InterruptError)
}
fn guest_memory(&self) -> &GuestMemoryMmap {
&self.device_state.active_state().unwrap().mem
}
fn nb_blocks_to_len(&self, nb_blocks: usize) -> usize {
nb_blocks * u64_to_usize(self.config.block_size)
}
/// Returns the state of all the blocks in the given range.
///
/// Note: the range passed to this function must be within the device memory to avoid
/// out-of-bound panics.
fn range_state(&self, range: &RequestedRange) -> BlockRangeState {
let plugged_count = self.plugged_blocks[self.unchecked_block_range(range)].count_ones();
match plugged_count {
nb_blocks if nb_blocks == range.nb_blocks => BlockRangeState::Plugged,
0 => BlockRangeState::Unplugged,
_ => BlockRangeState::Mixed,
}
}
fn parse_request(
&self,
avail_desc: &DescriptorChain,
) -> Result<(Request, GuestAddress, u16), VirtioMemError> {
// The head contains the request type which MUST be readable.
if avail_desc.is_write_only() {
return Err(VirtioMemError::UnexpectedWriteOnlyDescriptor);
}
if (avail_desc.len as usize) < size_of::<virtio_mem::virtio_mem_req>() {
return Err(VirtioMemError::DescriptorLengthTooSmall);
}
let request: virtio_mem::virtio_mem_req = self
.guest_memory()
.read_obj(avail_desc.addr)
.map_err(|_| VirtioMemError::DescriptorReadFailed)?;
let resp_desc = avail_desc
.next_descriptor()
.ok_or(VirtioMemError::DescriptorChainTooShort)?;
// The response MUST always be writable.
if !resp_desc.is_write_only() {
return Err(VirtioMemError::UnexpectedReadOnlyDescriptor);
}
if (resp_desc.len as usize) < std::mem::size_of::<virtio_mem::virtio_mem_resp>() {
return Err(VirtioMemError::DescriptorLengthTooSmall);
}
Ok((request.into(), resp_desc.addr, avail_desc.index))
}
fn write_response(
&mut self,
resp: Response,
resp_addr: GuestAddress,
used_idx: u16,
) -> Result<(), VirtioMemError> {
debug!("virtio-mem: Response: {:?}", resp);
self.guest_memory()
.write_obj(virtio_mem::virtio_mem_resp::from(resp), resp_addr)
.map_err(|_| VirtioMemError::DescriptorWriteFailed)
.map(|_| size_of::<virtio_mem::virtio_mem_resp>())?;
self.queues[MEM_QUEUE]
.add_used(
used_idx,
u32::try_from(std::mem::size_of::<virtio_mem::virtio_mem_resp>()).unwrap(),
)
.map_err(VirtioMemError::QueueError)
}
/// Checks that the range provided by the driver is within the usable memory region
fn validate_range(&self, range: &RequestedRange) -> Result<(), VirtioMemError> {
// Ensure the range is aligned
if !range
.addr
.raw_value()
.is_multiple_of(self.config.block_size)
{
return Err(VirtioMemError::InvalidRange(*range));
}
if range.nb_blocks == 0 {
return Err(VirtioMemError::InvalidRange(*range));
}
// Ensure the start addr is within the usable region
let start_off = range
.addr
.checked_offset_from(self.guest_address())
.filter(|&off| off < self.config.usable_region_size)
.ok_or(VirtioMemError::InvalidRange(*range))?;
// Ensure the end offset (exclusive) is within the usable region
let end_off = start_off
.checked_add(usize_to_u64(self.nb_blocks_to_len(range.nb_blocks)))
.filter(|&end_off| end_off <= self.config.usable_region_size)
.ok_or(VirtioMemError::InvalidRange(*range))?;
Ok(())
}
fn unchecked_block_range(&self, range: &RequestedRange) -> Range<usize> {
let start_block = u64_to_usize((range.addr.0 - self.config.addr) / self.config.block_size);
start_block..(start_block + range.nb_blocks)
}
fn process_plug_request(&mut self, range: &RequestedRange) -> Result<(), VirtioMemError> {
self.validate_range(range)?;
if self.config.plugged_size + usize_to_u64(self.nb_blocks_to_len(range.nb_blocks))
> self.config.requested_size
{
return Err(VirtioMemError::PlugRequestIsTooBig);
}
match self.range_state(range) {
// the range was validated
BlockRangeState::Unplugged => self.update_range(range, true),
state => Err(VirtioMemError::PlugRequestBlockStateInvalid(state)),
}
}
fn handle_plug_request(
&mut self,
range: &RequestedRange,
resp_addr: GuestAddress,
used_idx: u16,
) -> Result<(), VirtioMemError> {
METRICS.plug_count.inc();
let _metric = METRICS.plug_agg.record_latency_metrics();
let response = match self.process_plug_request(range) {
Err(err) => {
METRICS.plug_fails.inc();
error!("virtio-mem: Failed to plug range: {}", err);
Response::error()
}
Ok(_) => {
METRICS
.plug_bytes
.add(usize_to_u64(self.nb_blocks_to_len(range.nb_blocks)));
Response::ack()
}
};
self.write_response(response, resp_addr, used_idx)
}
fn process_unplug_request(&mut self, range: &RequestedRange) -> Result<(), VirtioMemError> {
self.validate_range(range)?;
match self.range_state(range) {
// the range was validated
BlockRangeState::Plugged => self.update_range(range, false),
state => Err(VirtioMemError::UnplugRequestBlockStateInvalid(state)),
}
}
fn handle_unplug_request(
&mut self,
range: &RequestedRange,
resp_addr: GuestAddress,
used_idx: u16,
) -> Result<(), VirtioMemError> {
METRICS.unplug_count.inc();
let _metric = METRICS.unplug_agg.record_latency_metrics();
let response = match self.process_unplug_request(range) {
Err(err) => {
METRICS.unplug_fails.inc();
error!("virtio-mem: Failed to unplug range: {}", err);
Response::error()
}
Ok(_) => {
METRICS
.unplug_bytes
.add(usize_to_u64(self.nb_blocks_to_len(range.nb_blocks)));
Response::ack()
}
};
self.write_response(response, resp_addr, used_idx)
}
fn handle_unplug_all_request(
&mut self,
resp_addr: GuestAddress,
used_idx: u16,
) -> Result<(), VirtioMemError> {
METRICS.unplug_all_count.inc();
let _metric = METRICS.unplug_all_agg.record_latency_metrics();
let range = RequestedRange {
addr: self.guest_address(),
nb_blocks: self.plugged_blocks.len(),
};
let response = match self.update_range(&range, false) {
Err(err) => {
METRICS.unplug_all_fails.inc();
error!("virtio-mem: Failed to unplug all: {}", err);
Response::error()
}
Ok(_) => {
self.config.usable_region_size = 0;
Response::ack()
}
};
self.write_response(response, resp_addr, used_idx)
}
fn handle_state_request(
&mut self,
range: &RequestedRange,
resp_addr: GuestAddress,
used_idx: u16,
) -> Result<(), VirtioMemError> {
METRICS.state_count.inc();
let _metric = METRICS.state_agg.record_latency_metrics();
let response = match self.validate_range(range) {
Err(err) => {
METRICS.state_fails.inc();
error!("virtio-mem: Failed to retrieve state of range: {}", err);
Response::error()
}
// the range was validated
Ok(_) => Response::ack_with_state(self.range_state(range)),
};
self.write_response(response, resp_addr, used_idx)
}
fn process_mem_queue(&mut self) -> Result<(), VirtioMemError> {
while let Some(desc) = self.queues[MEM_QUEUE].pop()? {
let index = desc.index;
let (req, resp_addr, used_idx) = self.parse_request(&desc)?;
debug!("virtio-mem: Request: {:?}", req);
// Handle request and write response
match req {
Request::State(ref range) => self.handle_state_request(range, resp_addr, used_idx),
Request::Plug(ref range) => self.handle_plug_request(range, resp_addr, used_idx),
Request::Unplug(ref range) => {
self.handle_unplug_request(range, resp_addr, used_idx)
}
Request::UnplugAll => self.handle_unplug_all_request(resp_addr, used_idx),
Request::Unsupported(t) => Err(VirtioMemError::UnknownRequestType(t)),
}?;
}
self.queues[MEM_QUEUE].advance_used_ring_idx();
self.signal_used_queue()?;
Ok(())
}
pub(crate) fn process_mem_queue_event(&mut self) {
METRICS.queue_event_count.inc();
if let Err(err) = self.queue_events[MEM_QUEUE].read() {
METRICS.queue_event_fails.inc();
error!("Failed to read mem queue event: {err}");
return;
}
if let Err(err) = self.process_mem_queue() {
METRICS.queue_event_fails.inc();
error!("virtio-mem: Failed to process queue: {err}");
}
}
pub fn process_virtio_queues(&mut self) -> Result<(), VirtioMemError> {
self.process_mem_queue()
}
pub(crate) fn set_avail_features(&mut self, features: u64) {
self.avail_features = features;
}
pub(crate) fn set_acked_features(&mut self, features: u64) {
self.acked_features = features;
}
pub(crate) fn activate_event(&self) -> &EventFd {
&self.activate_event
}
fn update_kvm_slots(&self, updated_range: &RequestedRange) -> Result<(), VirtioMemError> {
let hp_region = self
.guest_memory()
.iter()
.find(|r| r.region_type == GuestRegionType::Hotpluggable)
.expect("there should be one and only one hotpluggable region");
hp_region
.slots_intersecting_range(
updated_range.addr,
self.nb_blocks_to_len(updated_range.nb_blocks),
)
.try_for_each(|slot| {
let slot_range = RequestedRange {
addr: slot.guest_addr,
nb_blocks: slot.slice.len() / u64_to_usize(self.config.block_size),
};
match self.range_state(&slot_range) {
BlockRangeState::Mixed | BlockRangeState::Plugged => {
hp_region.update_slot(&self.vm, &slot, true)
}
BlockRangeState::Unplugged => hp_region.update_slot(&self.vm, &slot, false),
}
.map_err(VirtioMemError::UpdateKvmSlot)
})
}
/// Plugs/unplugs the given range
///
/// Note: the range passed to this function must be within the device memory to avoid
/// out-of-bound panics.
fn update_range(&mut self, range: &RequestedRange, plug: bool) -> Result<(), VirtioMemError> {
// Update internal state
let block_range = self.unchecked_block_range(range);
let plugged_blocks_slice = &mut self.plugged_blocks[block_range];
let plugged_before = plugged_blocks_slice.count_ones();
plugged_blocks_slice.fill(plug);
let plugged_after = plugged_blocks_slice.count_ones();
self.config.plugged_size -= usize_to_u64(self.nb_blocks_to_len(plugged_before));
self.config.plugged_size += usize_to_u64(self.nb_blocks_to_len(plugged_after));
// If unplugging, discard the range
if !plug {
self.guest_memory()
.discard_range(range.addr, self.nb_blocks_to_len(range.nb_blocks))
.inspect_err(|err| {
// Failure to discard is not fatal and is not reported to the driver. It only
// gets logged.
METRICS.unplug_discard_fails.inc();
error!("virtio-mem: Failed to discard memory range: {}", err);
});
}
self.update_kvm_slots(range)
}
/// Updates the requested size of the virtio-mem device.
pub fn update_requested_size(
&mut self,
requested_size_mib: usize,
) -> Result<(), VirtioMemError> {
let requested_size = usize_to_u64(mib_to_bytes(requested_size_mib));
if !self.is_activated() {
return Err(VirtioMemError::DeviceNotActive);
}
if requested_size % self.config.block_size != 0 {
return Err(VirtioMemError::InvalidSize(requested_size));
}
if requested_size > self.config.region_size {
return Err(VirtioMemError::InvalidSize(requested_size));
}
// Increase the usable_region_size if it's not enough for the guest to plug new
// memory blocks.
// The device cannot decrease the usable_region_size unless the guest requests
// to reset it with an UNPLUG_ALL request.
if self.config.usable_region_size < requested_size {
self.config.usable_region_size =
requested_size.next_multiple_of(usize_to_u64(self.slot_size));
debug!(
"virtio-mem: Updated usable size to {} bytes",
self.config.usable_region_size
);
}
self.config.requested_size = requested_size;
debug!(
"virtio-mem: Updated requested size to {} bytes",
requested_size
);
self.interrupt_trigger()
.trigger(VirtioInterruptType::Config)
.map_err(VirtioMemError::InterruptError)
}
}
impl VirtioDevice for VirtioMem {
impl_device_type!(VIRTIO_ID_MEM);
fn queues(&self) -> &[Queue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [Queue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_events
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.device_state
.active_state()
.expect("Device is not activated")
.interrupt
.deref()
}
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features;
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let offset = u64_to_usize(offset);
self.config
.as_slice()
.get(offset..offset + data.len())
.map(|s| data.copy_from_slice(s))
.unwrap_or_else(|| {
error!(
"virtio-mem: Config read offset+length {offset}+{} out of bounds",
data.len()
)
})
}
fn write_config(&mut self, offset: u64, _data: &[u8]) {
error!("virtio-mem: Attempted write to read-only config space at offset {offset}");
}
fn is_activated(&self) -> bool {
self.device_state.is_activated()
}
fn activate(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
if (self.acked_features & (1 << VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE)) == 0 {
error!(
"virtio-mem: VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE feature not acknowledged by guest"
);
METRICS.activate_fails.inc();
return Err(ActivateError::RequiredFeatureNotAcked(
"VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE",
));
}
for q in self.queues.iter_mut() {
q.initialize(&mem)
.map_err(ActivateError::QueueMemoryError)?;
}
self.device_state = DeviceState::Activated(ActiveState { mem, interrupt });
if self.activate_event.write(1).is_err() {
METRICS.activate_fails.inc();
self.device_state = DeviceState::Inactive;
return Err(ActivateError::EventFd);
}
Ok(())
}
fn kick(&mut self) {
if self.is_activated() {
info!("kick mem {}.", self.id());
self.process_virtio_queues();
}
}
}
#[cfg(test)]
pub(crate) mod test_utils {
use super::*;
use crate::devices::virtio::test_utils::test::VirtioTestDevice;
use crate::test_utils::single_region_mem;
use crate::vmm_config::machine_config::HugePageConfig;
use crate::vstate::memory;
use crate::vstate::vm::tests::setup_vm_with_memory;
impl VirtioTestDevice for VirtioMem {
fn set_queues(&mut self, queues: Vec<Queue>) {
self.queues = queues;
}
fn num_queues(&self) -> usize {
MEM_NUM_QUEUES
}
}
pub(crate) fn default_virtio_mem() -> VirtioMem {
let (_, mut vm) = setup_vm_with_memory(0x1000);
let addr = GuestAddress(512 << 30);
vm.register_hotpluggable_memory_region(
memory::anonymous(
std::iter::once((addr, mib_to_bytes(1024))),
false,
HugePageConfig::None,
)
.unwrap()
.pop()
.unwrap(),
mib_to_bytes(128),
);
let vm = Arc::new(vm);
VirtioMem::new(vm, addr, 1024, 2, 128).unwrap()
}
}
#[cfg(test)]
mod tests {
use std::ptr::null_mut;
use serde_json::de;
use vm_memory::guest_memory;
use vm_memory::mmap::MmapRegionBuilder;
use super::*;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::mem::device::test_utils::default_virtio_mem;
use crate::devices::virtio::queue::VIRTQ_DESC_F_WRITE;
use crate::devices::virtio::test_utils::test::VirtioTestHelper;
use crate::vstate::vm::tests::setup_vm_with_memory;
#[test]
fn test_new() {
let mem = default_virtio_mem();
assert_eq!(mem.total_size_mib(), 1024);
assert_eq!(mem.block_size_mib(), 2);
assert_eq!(mem.plugged_size_mib(), 0);
assert_eq!(mem.id(), VIRTIO_MEM_DEV_ID);
assert_eq!(mem.device_type(), VIRTIO_ID_MEM);
let features = (1 << VIRTIO_F_VERSION_1) | (1 << VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE);
assert_eq!(mem.avail_features(), features);
assert_eq!(mem.acked_features(), 0);
assert!(!mem.is_activated());
assert_eq!(mem.queues().len(), MEM_NUM_QUEUES);
assert_eq!(mem.queue_events().len(), MEM_NUM_QUEUES);
}
#[test]
fn test_from_state() {
let (_, vm) = setup_vm_with_memory(0x1000);
let vm = Arc::new(vm);
let queues = vec![Queue::new(FIRECRACKER_MAX_QUEUE_SIZE); MEM_NUM_QUEUES];
let addr = 512 << 30;
let region_size_mib = 2048;
let block_size_mib = 2;
let slot_size_mib = 128;
let plugged_size_mib = 512;
let usable_region_size = mib_to_bytes(1024) as u64;
let config = virtio_mem_config {
addr,
region_size: mib_to_bytes(region_size_mib) as u64,
block_size: mib_to_bytes(block_size_mib) as u64,
plugged_size: mib_to_bytes(plugged_size_mib) as u64,
usable_region_size,
..Default::default()
};
let plugged_blocks = BitVec::repeat(
false,
mib_to_bytes(region_size_mib) / mib_to_bytes(block_size_mib),
);
let mem = VirtioMem::from_state(
vm,
queues,
config,
mib_to_bytes(slot_size_mib),
plugged_blocks,
)
.unwrap();
assert_eq!(mem.config.addr, addr);
assert_eq!(mem.total_size_mib(), region_size_mib);
assert_eq!(mem.block_size_mib(), block_size_mib);
assert_eq!(mem.slot_size_mib(), slot_size_mib);
assert_eq!(mem.plugged_size_mib(), plugged_size_mib);
assert_eq!(mem.config.usable_region_size, usable_region_size);
}
#[test]
fn test_read_config() {
let mem = default_virtio_mem();
let mut data = [0u8; 8];
mem.read_config(0, &mut data);
assert_eq!(
u64::from_le_bytes(data),
mib_to_bytes(mem.block_size_mib()) as u64
);
mem.read_config(16, &mut data);
assert_eq!(u64::from_le_bytes(data), 512 << 30);
mem.read_config(24, &mut data);
assert_eq!(
u64::from_le_bytes(data),
mib_to_bytes(mem.total_size_mib()) as u64
);
}
#[test]
fn test_read_config_out_of_bounds() {
let mem = default_virtio_mem();
let mut data = [0u8; 8];
let config_size = std::mem::size_of::<virtio_mem_config>();
mem.read_config(config_size as u64, &mut data);
assert_eq!(data, [0u8; 8]); // Should remain unchanged
let mut data = vec![0u8; config_size];
mem.read_config(8, &mut data);
assert_eq!(data, vec![0u8; config_size]); // Should remain unchanged
}
#[test]
fn test_write_config() {
let mut mem = default_virtio_mem();
let data = [1u8; 8];
mem.write_config(0, &data); // Should log error but not crash
// should not change config
let mut data = [0u8; 8];
mem.read_config(0, &mut data);
let block_size = u64::from_le_bytes(data);
assert_eq!(block_size, mib_to_bytes(2) as u64);
}
#[test]
fn test_set_features() {
let mut mem = default_virtio_mem();
mem.set_avail_features(123);
assert_eq!(mem.avail_features(), 123);
mem.set_acked_features(456);
assert_eq!(mem.acked_features(), 456);
}
#[test]
fn test_status() {
let mut mem = default_virtio_mem();
let status = mem.status();
assert_eq!(
status,
VirtioMemStatus {
block_size_mib: 2,
total_size_mib: 1024,
slot_size_mib: 128,
plugged_size_mib: 0,
requested_size_mib: 0,
}
);
}
#[allow(clippy::cast_possible_truncation)]
const REQ_SIZE: u32 = std::mem::size_of::<virtio_mem::virtio_mem_req>() as u32;
#[allow(clippy::cast_possible_truncation)]
const RESP_SIZE: u32 = std::mem::size_of::<virtio_mem::virtio_mem_resp>() as u32;
fn test_helper<'a>(
mut dev: VirtioMem,
mem: &'a GuestMemoryMmap,
) -> VirtioTestHelper<'a, VirtioMem> {
dev.set_acked_features(dev.avail_features);
let mut th = VirtioTestHelper::<VirtioMem>::new(mem, dev);
th.activate_device(mem);
th
}
fn emulate_request(
th: &mut VirtioTestHelper<VirtioMem>,
mem: &GuestMemoryMmap,
req: Request,
) -> Response {
th.add_desc_chain(
MEM_QUEUE,
0,
&[(0, REQ_SIZE, 0), (1, RESP_SIZE, VIRTQ_DESC_F_WRITE)],
);
mem.write_obj(
virtio_mem::virtio_mem_req::from(req),
th.desc_address(MEM_QUEUE, 0),
)
.unwrap();
assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
mem.read_obj::<virtio_mem::virtio_mem_resp>(th.desc_address(MEM_QUEUE, 1))
.unwrap()
.into()
}
#[test]
fn test_event_fail_descriptor_chain_too_short() {
let mut mem_dev = default_virtio_mem();
let guest_mem = mem_dev.vm.guest_memory().clone();
let mut th = test_helper(mem_dev, &guest_mem);
let queue_event_count = METRICS.queue_event_count.count();
let queue_event_fails = METRICS.queue_event_fails.count();
th.add_desc_chain(MEM_QUEUE, 0, &[(0, REQ_SIZE, 0)]);
assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails + 1);
}
#[test]
fn test_event_fail_descriptor_length_too_small() {
let mut mem_dev = default_virtio_mem();
let guest_mem = mem_dev.vm.guest_memory().clone();
let mut th = test_helper(mem_dev, &guest_mem);
let queue_event_count = METRICS.queue_event_count.count();
let queue_event_fails = METRICS.queue_event_fails.count();
th.add_desc_chain(MEM_QUEUE, 0, &[(0, 1, 0)]);
assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
assert_eq!(METRICS.queue_event_count.count(), queue_event_count + 1);
assert_eq!(METRICS.queue_event_fails.count(), queue_event_fails + 1);
}
#[test]
fn test_event_fail_unexpected_writeonly_descriptor() {
let mut mem_dev = default_virtio_mem();
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/mem/mod.rs | src/vmm/src/devices/virtio/mem/mod.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
mod device;
mod event_handler;
pub mod metrics;
pub mod persist;
mod request;
use vm_memory::GuestAddress;
pub use self::device::{VirtioMem, VirtioMemError, VirtioMemStatus};
use crate::arch::FIRST_ADDR_PAST_64BITS_MMIO;
pub(crate) const MEM_NUM_QUEUES: usize = 1;
pub(crate) const MEM_QUEUE: usize = 0;
pub const VIRTIO_MEM_DEFAULT_BLOCK_SIZE_MIB: usize = 2;
pub const VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB: usize = 128;
pub const VIRTIO_MEM_DEV_ID: &str = "mem";
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/mem/event_handler.rs | src/vmm/src/devices/virtio/mem/event_handler.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use event_manager::{EventOps, Events, MutEventSubscriber};
use vmm_sys_util::epoll::EventSet;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::mem::MEM_QUEUE;
use crate::devices::virtio::mem::device::VirtioMem;
use crate::logger::{error, warn};
impl VirtioMem {
const PROCESS_ACTIVATE: u32 = 0;
const PROCESS_MEM_QUEUE: u32 = 1;
fn register_runtime_events(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.queue_events()[MEM_QUEUE],
Self::PROCESS_MEM_QUEUE,
EventSet::IN,
)) {
error!("virtio-mem: Failed to register queue event: {err}");
}
}
fn register_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
self.activate_event(),
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("virtio-mem: Failed to register activate event: {err}");
}
}
fn process_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = self.activate_event().read() {
error!("virtio-mem: Failed to consume activate event: {err}");
}
// Register runtime events
self.register_runtime_events(ops);
// Remove activate event
if let Err(err) = ops.remove(Events::with_data(
self.activate_event(),
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("virtio-mem: Failed to un-register activate event: {err}");
}
}
}
impl MutEventSubscriber for VirtioMem {
fn init(&mut self, ops: &mut event_manager::EventOps) {
// This function can be called during different points in the device lifetime:
// - shortly after device creation,
// - on device activation (is-activated already true at this point),
// - on device restore from snapshot.
if self.is_activated() {
self.register_runtime_events(ops);
} else {
self.register_activate_event(ops);
}
}
fn process(&mut self, events: event_manager::Events, ops: &mut event_manager::EventOps) {
let event_set = events.event_set();
let source = events.data();
if !event_set.contains(EventSet::IN) {
warn!("virtio-mem: Received unknown event: {event_set:?} from source {source}");
return;
}
if !self.is_activated() {
warn!("virtio-mem: The device is not activated yet. Spurious event received: {source}");
return;
}
match source {
Self::PROCESS_ACTIVATE => self.process_activate_event(ops),
Self::PROCESS_MEM_QUEUE => self.process_mem_queue_event(),
_ => {
warn!("virtio-mem: Unknown event received: {source}");
}
}
}
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use event_manager::{EventManager, SubscriberOps};
use vmm_sys_util::epoll::EventSet;
use super::*;
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::generated::virtio_mem::VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE;
use crate::devices::virtio::mem::device::test_utils::default_virtio_mem;
use crate::devices::virtio::test_utils::{VirtQueue, default_interrupt, default_mem};
use crate::vstate::memory::GuestAddress;
#[test]
fn test_event_handler_activation() {
let mut event_manager = EventManager::new().unwrap();
let mut mem_device = default_virtio_mem();
let mem = default_mem();
let interrupt = default_interrupt();
// Set up queue
let virtq = VirtQueue::new(GuestAddress(0), &mem, 16);
mem_device.queues_mut()[MEM_QUEUE] = virtq.create_queue();
let mem_device = Arc::new(Mutex::new(mem_device));
let _id = event_manager.add_subscriber(mem_device.clone());
// Device should register activate event when inactive
assert!(!mem_device.lock().unwrap().is_activated());
// Device should prevent activation before features are acked
let err = mem_device
.lock()
.unwrap()
.activate(mem.clone(), interrupt.clone())
.unwrap_err();
assert!(matches!(err, ActivateError::RequiredFeatureNotAcked(_)));
// Ack the feature and activate the device
mem_device
.lock()
.unwrap()
.set_acked_features(1 << VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE);
mem_device.lock().unwrap().activate(mem, interrupt).unwrap();
// Process activation event
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 1);
assert!(mem_device.lock().unwrap().is_activated());
}
#[test]
fn test_process_mem_queue_event() {
let mut event_manager = EventManager::new().unwrap();
let mut mem_device = default_virtio_mem();
let mem = default_mem();
let interrupt = default_interrupt();
// Set up queue
let virtq = VirtQueue::new(GuestAddress(0), &mem, 16);
mem_device.queues_mut()[MEM_QUEUE] = virtq.create_queue();
mem_device.set_acked_features(mem_device.avail_features());
let mem_device = Arc::new(Mutex::new(mem_device));
let _id = event_manager.add_subscriber(mem_device.clone());
// Activate device first
mem_device.lock().unwrap().activate(mem, interrupt).unwrap();
event_manager.run_with_timeout(50).unwrap(); // Process activation
// Trigger queue event
mem_device.lock().unwrap().queue_events()[MEM_QUEUE]
.write(1)
.unwrap();
// Process queue event
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 1);
}
#[test]
fn test_spurious_event_before_activation() {
let mut event_manager = EventManager::new().unwrap();
let mem_device = default_virtio_mem();
let mem_device = Arc::new(Mutex::new(mem_device));
let _id = event_manager.add_subscriber(mem_device.clone());
// Try to trigger queue event before activation
mem_device.lock().unwrap().queue_events()[MEM_QUEUE]
.write(1)
.unwrap();
// Should not process queue events before activation
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 0);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/mem/metrics.rs | src/vmm/src/devices/virtio/mem/metrics.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system for memory devices.
//!
//! # Metrics format
//! The metrics are flushed in JSON when requested by vmm::logger::metrics::METRICS.write().
//!
//! ## JSON example with metrics:
//! ```json
//! "memory_hotplug": {
//! "activate_fails": "SharedIncMetric",
//! "queue_event_fails": "SharedIncMetric",
//! "queue_event_count": "SharedIncMetric",
//! ...
//! }
//! }
//! ```
//! Each `memory` field in the example above is a serializable `VirtioMemDeviceMetrics` structure
//! collecting metrics such as `activate_fails`, `queue_event_fails` etc. for the memoty hotplug
//! device.
//! Since Firecrakcer only supports one virtio-mem device, there is no per device metrics and
//! `memory_hotplug` represents the aggregate entropy metrics.
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use crate::logger::{LatencyAggregateMetrics, SharedIncMetric};
/// Stores aggregated virtio-mem metrics
pub(super) static METRICS: VirtioMemDeviceMetrics = VirtioMemDeviceMetrics::new();
/// Called by METRICS.flush(), this function facilitates serialization of virtio-mem device metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let mut seq = serializer.serialize_map(Some(1))?;
seq.serialize_entry("memory_hotplug", &METRICS)?;
seq.end()
}
#[derive(Debug, Serialize)]
pub(super) struct VirtioMemDeviceMetrics {
/// Number of device activation failures
pub activate_fails: SharedIncMetric,
/// Number of queue event handling failures
pub queue_event_fails: SharedIncMetric,
/// Number of queue events handled
pub queue_event_count: SharedIncMetric,
/// Latency of Plug operations
pub plug_agg: LatencyAggregateMetrics,
/// Number of Plug operations
pub plug_count: SharedIncMetric,
/// Number of plugged bytes
pub plug_bytes: SharedIncMetric,
/// Number of Plug operations failed
pub plug_fails: SharedIncMetric,
/// Latency of Unplug operations
pub unplug_agg: LatencyAggregateMetrics,
/// Number of Unplug operations
pub unplug_count: SharedIncMetric,
/// Number of unplugged bytes
pub unplug_bytes: SharedIncMetric,
/// Number of Unplug operations failed
pub unplug_fails: SharedIncMetric,
/// Number of discards failed for an Unplug or UnplugAll operation
pub unplug_discard_fails: SharedIncMetric,
/// Latency of UnplugAll operations
pub unplug_all_agg: LatencyAggregateMetrics,
/// Number of UnplugAll operations
pub unplug_all_count: SharedIncMetric,
/// Number of UnplugAll operations failed
pub unplug_all_fails: SharedIncMetric,
/// Latency of State operations
pub state_agg: LatencyAggregateMetrics,
/// Number of State operations
pub state_count: SharedIncMetric,
/// Number of State operations failed
pub state_fails: SharedIncMetric,
}
impl VirtioMemDeviceMetrics {
/// Const default construction.
const fn new() -> Self {
Self {
activate_fails: SharedIncMetric::new(),
queue_event_fails: SharedIncMetric::new(),
queue_event_count: SharedIncMetric::new(),
plug_agg: LatencyAggregateMetrics::new(),
plug_count: SharedIncMetric::new(),
plug_bytes: SharedIncMetric::new(),
plug_fails: SharedIncMetric::new(),
unplug_agg: LatencyAggregateMetrics::new(),
unplug_count: SharedIncMetric::new(),
unplug_bytes: SharedIncMetric::new(),
unplug_fails: SharedIncMetric::new(),
unplug_discard_fails: SharedIncMetric::new(),
unplug_all_agg: LatencyAggregateMetrics::new(),
unplug_all_count: SharedIncMetric::new(),
unplug_all_fails: SharedIncMetric::new(),
state_agg: LatencyAggregateMetrics::new(),
state_count: SharedIncMetric::new(),
state_fails: SharedIncMetric::new(),
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::logger::IncMetric;
#[test]
fn test_memory_hotplug_metrics() {
let mem_metrics: VirtioMemDeviceMetrics = VirtioMemDeviceMetrics::new();
mem_metrics.queue_event_count.inc();
assert_eq!(mem_metrics.queue_event_count.count(), 1);
let _ = serde_json::to_string(&mem_metrics).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/mem/request.rs | src/vmm/src/devices/virtio/mem/request.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vm_memory::{Address, ByteValued, GuestAddress};
use crate::devices::virtio::generated::virtio_mem;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct RequestedRange {
pub(crate) addr: GuestAddress,
pub(crate) nb_blocks: usize,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum Request {
Plug(RequestedRange),
Unplug(RequestedRange),
UnplugAll,
State(RequestedRange),
Unsupported(u32),
}
// SAFETY: this is safe, trust me bro
unsafe impl ByteValued for virtio_mem::virtio_mem_req {}
impl From<virtio_mem::virtio_mem_req> for Request {
fn from(req: virtio_mem::virtio_mem_req) -> Self {
match req.type_.into() {
// SAFETY: union type is checked in the match
virtio_mem::VIRTIO_MEM_REQ_PLUG => unsafe {
Request::Plug(RequestedRange {
addr: GuestAddress(req.u.plug.addr),
nb_blocks: req.u.plug.nb_blocks.into(),
})
},
// SAFETY: union type is checked in the match
virtio_mem::VIRTIO_MEM_REQ_UNPLUG => unsafe {
Request::Unplug(RequestedRange {
addr: GuestAddress(req.u.unplug.addr),
nb_blocks: req.u.unplug.nb_blocks.into(),
})
},
virtio_mem::VIRTIO_MEM_REQ_UNPLUG_ALL => Request::UnplugAll,
// SAFETY: union type is checked in the match
virtio_mem::VIRTIO_MEM_REQ_STATE => unsafe {
Request::State(RequestedRange {
addr: GuestAddress(req.u.state.addr),
nb_blocks: req.u.state.nb_blocks.into(),
})
},
t => Request::Unsupported(t),
}
}
}
#[repr(u16)]
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[allow(clippy::cast_possible_truncation)]
pub enum ResponseType {
Ack = virtio_mem::VIRTIO_MEM_RESP_ACK as u16,
Nack = virtio_mem::VIRTIO_MEM_RESP_NACK as u16,
Busy = virtio_mem::VIRTIO_MEM_RESP_BUSY as u16,
Error = virtio_mem::VIRTIO_MEM_RESP_ERROR as u16,
}
#[repr(u16)]
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[allow(clippy::cast_possible_truncation)]
pub enum BlockRangeState {
Plugged = virtio_mem::VIRTIO_MEM_STATE_PLUGGED as u16,
Unplugged = virtio_mem::VIRTIO_MEM_STATE_UNPLUGGED as u16,
Mixed = virtio_mem::VIRTIO_MEM_STATE_MIXED as u16,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Response {
pub resp_type: ResponseType,
// Only for State requests
pub state: Option<BlockRangeState>,
}
impl Response {
pub(crate) fn error() -> Self {
Response {
resp_type: ResponseType::Error,
state: None,
}
}
pub(crate) fn ack() -> Self {
Response {
resp_type: ResponseType::Ack,
state: None,
}
}
pub(crate) fn ack_with_state(state: BlockRangeState) -> Self {
Response {
resp_type: ResponseType::Ack,
state: Some(state),
}
}
pub(crate) fn is_ack(&self) -> bool {
self.resp_type == ResponseType::Ack
}
pub(crate) fn is_error(&self) -> bool {
self.resp_type == ResponseType::Error
}
}
// SAFETY: Plain data structures
unsafe impl ByteValued for virtio_mem::virtio_mem_resp {}
impl From<Response> for virtio_mem::virtio_mem_resp {
fn from(resp: Response) -> Self {
let mut out = virtio_mem::virtio_mem_resp {
type_: resp.resp_type as u16,
..Default::default()
};
if let Some(state) = resp.state {
out.u.state.state = state as u16;
}
out
}
}
#[cfg(test)]
mod test_util {
use super::*;
// Implement the reverse conversions to use in test code.
impl From<Request> for virtio_mem::virtio_mem_req {
fn from(req: Request) -> virtio_mem::virtio_mem_req {
match req {
Request::Plug(r) => virtio_mem::virtio_mem_req {
type_: virtio_mem::VIRTIO_MEM_REQ_PLUG.try_into().unwrap(),
u: virtio_mem::virtio_mem_req__bindgen_ty_1 {
plug: virtio_mem::virtio_mem_req_plug {
addr: r.addr.raw_value(),
nb_blocks: r.nb_blocks.try_into().unwrap(),
..Default::default()
},
},
..Default::default()
},
Request::Unplug(r) => virtio_mem::virtio_mem_req {
type_: virtio_mem::VIRTIO_MEM_REQ_UNPLUG.try_into().unwrap(),
u: virtio_mem::virtio_mem_req__bindgen_ty_1 {
unplug: virtio_mem::virtio_mem_req_unplug {
addr: r.addr.raw_value(),
nb_blocks: r.nb_blocks.try_into().unwrap(),
..Default::default()
},
},
..Default::default()
},
Request::UnplugAll => virtio_mem::virtio_mem_req {
type_: virtio_mem::VIRTIO_MEM_REQ_UNPLUG_ALL.try_into().unwrap(),
..Default::default()
},
Request::State(r) => virtio_mem::virtio_mem_req {
type_: virtio_mem::VIRTIO_MEM_REQ_STATE.try_into().unwrap(),
u: virtio_mem::virtio_mem_req__bindgen_ty_1 {
state: virtio_mem::virtio_mem_req_state {
addr: r.addr.raw_value(),
nb_blocks: r.nb_blocks.try_into().unwrap(),
..Default::default()
},
},
..Default::default()
},
Request::Unsupported(t) => virtio_mem::virtio_mem_req {
type_: t.try_into().unwrap(),
..Default::default()
},
}
}
}
impl From<virtio_mem::virtio_mem_resp> for Response {
fn from(resp: virtio_mem::virtio_mem_resp) -> Self {
Response {
resp_type: match resp.type_.into() {
virtio_mem::VIRTIO_MEM_RESP_ACK => ResponseType::Ack,
virtio_mem::VIRTIO_MEM_RESP_NACK => ResponseType::Nack,
virtio_mem::VIRTIO_MEM_RESP_BUSY => ResponseType::Busy,
virtio_mem::VIRTIO_MEM_RESP_ERROR => ResponseType::Error,
t => panic!("Invalid response type: {:?}", t),
},
// There is no way to know whether this is present or not as it depends on the
// request types. Callers should ignore this value if the request wasn't STATE
/// SAFETY: test code only. Uninitialized values are 0 and recognized as PLUGGED.
state: Some(unsafe {
match resp.u.state.state.into() {
virtio_mem::VIRTIO_MEM_STATE_PLUGGED => BlockRangeState::Plugged,
virtio_mem::VIRTIO_MEM_STATE_UNPLUGGED => BlockRangeState::Unplugged,
virtio_mem::VIRTIO_MEM_STATE_MIXED => BlockRangeState::Mixed,
t => panic!("Invalid state: {:?}", t),
}
}),
}
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/legacy/serial.rs | src/vmm/src/devices/legacy/serial.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Implements a wrapper over an UART serial device.
use std::fmt::Debug;
use std::fs::File;
use std::io::{self, Read, Stdin, Write};
use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::{Arc, Barrier};
use event_manager::{EventOps, Events, MutEventSubscriber};
use libc::EFD_NONBLOCK;
use log::{error, warn};
use serde::Serialize;
use vm_superio::serial::{Error as SerialError, SerialEvents};
use vm_superio::{Serial, Trigger};
use vmm_sys_util::epoll::EventSet;
use vmm_sys_util::eventfd::EventFd;
use crate::devices::legacy::EventFdTrigger;
use crate::logger::{IncMetric, SharedIncMetric};
use crate::vstate::bus::BusDevice;
/// Received Data Available interrupt - for letting the driver know that
/// there is some pending data to be processed.
pub const IER_RDA_BIT: u8 = 0b0000_0001;
/// Received Data Available interrupt offset
pub const IER_RDA_OFFSET: u8 = 1;
/// Metrics specific to the UART device.
#[derive(Debug, Serialize, Default)]
pub struct SerialDeviceMetrics {
/// Errors triggered while using the UART device.
pub error_count: SharedIncMetric,
/// Number of flush operations.
pub flush_count: SharedIncMetric,
/// Number of read calls that did not trigger a read.
pub missed_read_count: SharedIncMetric,
/// Number of write calls that did not trigger a write.
pub missed_write_count: SharedIncMetric,
/// Number of succeeded read calls.
pub read_count: SharedIncMetric,
/// Number of succeeded write calls.
pub write_count: SharedIncMetric,
}
impl SerialDeviceMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
error_count: SharedIncMetric::new(),
flush_count: SharedIncMetric::new(),
missed_read_count: SharedIncMetric::new(),
missed_write_count: SharedIncMetric::new(),
read_count: SharedIncMetric::new(),
write_count: SharedIncMetric::new(),
}
}
}
/// Stores aggregated metrics
pub(super) static METRICS: SerialDeviceMetrics = SerialDeviceMetrics::new();
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum RawIOError {
/// Serial error: {0:?}
Serial(SerialError<io::Error>),
}
pub trait RawIOHandler {
/// Send raw input to this emulated device.
fn raw_input(&mut self, _data: &[u8]) -> Result<(), RawIOError>;
}
impl<EV: SerialEvents + Debug, W: Write + Debug> RawIOHandler for Serial<EventFdTrigger, EV, W> {
// This is not used for anything and is basically just a dummy implementation for `raw_input`.
fn raw_input(&mut self, data: &[u8]) -> Result<(), RawIOError> {
// Fail fast if the serial is serviced with more data than it can buffer.
if data.len() > self.fifo_capacity() {
return Err(RawIOError::Serial(SerialError::FullFifo));
}
// Before enqueuing bytes we first check if there is enough free space
// in the FIFO.
if self.fifo_capacity() >= data.len() {
self.enqueue_raw_bytes(data).map_err(RawIOError::Serial)?;
}
Ok(())
}
}
/// Wrapper over available events (i.e metrics, buffer ready etc).
#[derive(Debug)]
pub struct SerialEventsWrapper {
/// Buffer ready event.
pub buffer_ready_event_fd: Option<EventFdTrigger>,
}
impl SerialEvents for SerialEventsWrapper {
fn buffer_read(&self) {
METRICS.read_count.inc();
}
fn out_byte(&self) {
METRICS.write_count.inc();
}
fn tx_lost_byte(&self) {
METRICS.missed_write_count.inc();
}
fn in_buffer_empty(&self) {
match self
.buffer_ready_event_fd
.as_ref()
.map_or(Ok(()), |buf_ready| buf_ready.write(1))
{
Ok(_) => (),
Err(err) => error!(
"Could not signal that serial device buffer is ready: {:?}",
err
),
}
}
}
#[derive(Debug)]
pub enum SerialOut {
Sink,
Stdout(std::io::Stdout),
File(File),
}
impl std::io::Write for SerialOut {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
match self {
Self::Sink => Ok(buf.len()),
Self::Stdout(stdout) => stdout.write(buf),
Self::File(file) => file.write(buf),
}
}
fn flush(&mut self) -> std::io::Result<()> {
match self {
Self::Sink => Ok(()),
Self::Stdout(stdout) => stdout.flush(),
Self::File(file) => file.flush(),
}
}
}
/// Wrapper over the imported serial device.
#[derive(Debug)]
pub struct SerialWrapper<T: Trigger, EV: SerialEvents, I: Read + AsRawFd + Send> {
/// Serial device object.
pub serial: Serial<T, EV, SerialOut>,
/// Input to the serial device (needs to be readable).
pub input: Option<I>,
}
impl<I: Read + AsRawFd + Send + Debug> SerialWrapper<EventFdTrigger, SerialEventsWrapper, I> {
fn handle_ewouldblock(&self, ops: &mut EventOps) {
let buffer_ready_fd = self.buffer_ready_evt_fd();
let input_fd = self.serial_input_fd();
if input_fd < 0 || buffer_ready_fd < 0 {
error!("Serial does not have a configured input source.");
return;
}
match ops.add(Events::new(&input_fd, EventSet::IN)) {
Err(event_manager::Error::FdAlreadyRegistered) => (),
Err(err) => {
error!(
"Could not register the serial input to the event manager: {:?}",
err
);
}
Ok(()) => {
// Bytes might had come on the unregistered stdin. Try to consume any.
self.serial.events().in_buffer_empty()
}
};
}
fn recv_bytes(&mut self) -> io::Result<usize> {
let avail_cap = self.serial.fifo_capacity();
if avail_cap == 0 {
return Err(io::Error::from_raw_os_error(libc::ENOBUFS));
}
if let Some(input) = self.input.as_mut() {
let mut out = vec![0u8; avail_cap];
let count = input.read(&mut out)?;
if count > 0 {
self.serial
.raw_input(&out[..count])
.map_err(|_| io::Error::from_raw_os_error(libc::ENOBUFS))?;
}
return Ok(count);
}
Err(io::Error::from_raw_os_error(libc::ENOTTY))
}
#[inline]
fn buffer_ready_evt_fd(&self) -> RawFd {
self.serial
.events()
.buffer_ready_event_fd
.as_ref()
.map_or(-1, |buf_ready| buf_ready.as_raw_fd())
}
#[inline]
fn serial_input_fd(&self) -> RawFd {
self.input.as_ref().map_or(-1, |input| input.as_raw_fd())
}
fn consume_buffer_ready_event(&self) -> io::Result<u64> {
self.serial
.events()
.buffer_ready_event_fd
.as_ref()
.map_or(Ok(0), |buf_ready| buf_ready.read())
}
}
/// Type for representing a serial device.
pub type SerialDevice = SerialWrapper<EventFdTrigger, SerialEventsWrapper, Stdin>;
impl SerialDevice {
pub fn new(serial_in: Option<Stdin>, serial_out: SerialOut) -> Result<Self, std::io::Error> {
let interrupt_evt = EventFdTrigger::new(EventFd::new(EFD_NONBLOCK)?);
let buffer_read_event_fd = EventFdTrigger::new(EventFd::new(EFD_NONBLOCK)?);
let serial = Serial::with_events(
interrupt_evt,
SerialEventsWrapper {
buffer_ready_event_fd: Some(buffer_read_event_fd),
},
serial_out,
);
Ok(SerialDevice {
serial,
input: serial_in,
})
}
}
impl<I: Read + AsRawFd + Send + Debug> MutEventSubscriber
for SerialWrapper<EventFdTrigger, SerialEventsWrapper, I>
{
/// Handle events on the serial input fd.
fn process(&mut self, event: Events, ops: &mut EventOps) {
#[inline]
fn unregister_source<T: AsRawFd + Debug>(ops: &mut EventOps, source: &T) {
match ops.remove(Events::new(source, EventSet::IN)) {
Ok(_) => (),
Err(_) => error!("Could not unregister source fd: {}", source.as_raw_fd()),
}
}
let input_fd = self.serial_input_fd();
let buffer_ready_fd = self.buffer_ready_evt_fd();
if input_fd < 0 || buffer_ready_fd < 0 {
error!("Serial does not have a configured input source.");
return;
}
if buffer_ready_fd == event.fd() {
match self.consume_buffer_ready_event() {
Ok(_) => (),
Err(err) => {
error!(
"Detach serial device input source due to error in consuming the buffer \
ready event: {:?}",
err
);
unregister_source(ops, &input_fd);
unregister_source(ops, &buffer_ready_fd);
return;
}
}
}
// We expect to receive: `EventSet::IN`, `EventSet::HANG_UP` or
// `EventSet::ERROR`. To process all these events we just have to
// read from the serial input.
match self.recv_bytes() {
Ok(count) => {
// Handle EOF if the event came from the input source.
if input_fd == event.fd() && count == 0 {
unregister_source(ops, &input_fd);
unregister_source(ops, &buffer_ready_fd);
warn!("Detached the serial input due to peer close/error.");
}
}
Err(err) => {
match err.raw_os_error() {
Some(errno) if errno == libc::ENOBUFS => {
unregister_source(ops, &input_fd);
}
Some(errno) if errno == libc::EWOULDBLOCK => {
self.handle_ewouldblock(ops);
}
Some(errno) if errno == libc::ENOTTY => {
error!("The serial device does not have the input source attached.");
unregister_source(ops, &input_fd);
unregister_source(ops, &buffer_ready_fd);
}
Some(_) | None => {
// Unknown error, detach the serial input source.
unregister_source(ops, &input_fd);
unregister_source(ops, &buffer_ready_fd);
warn!("Detached the serial input due to peer close/error.");
}
}
}
}
}
/// Initial registration of pollable objects.
/// If serial input is present, register the serial input FD as readable.
fn init(&mut self, ops: &mut EventOps) {
if self.input.is_some() && self.serial.events().buffer_ready_event_fd.is_some() {
let serial_fd = self.serial_input_fd();
let buf_ready_evt = self.buffer_ready_evt_fd();
// If the jailer is instructed to daemonize before exec-ing into firecracker, we set
// stdin, stdout and stderr to be open('/dev/null'). However, if stdin is redirected
// from /dev/null then trying to register FILENO_STDIN to epoll will fail with EPERM.
// Therefore, only try to register stdin to epoll if it is a terminal or a FIFO pipe.
// SAFETY: isatty has no invariants that need to be upheld. If serial_fd is an invalid
// argument, it will return 0 and set errno to EBADF.
if (unsafe { libc::isatty(serial_fd) } == 1 || is_fifo(serial_fd))
&& let Err(err) = ops.add(Events::new(&serial_fd, EventSet::IN))
{
warn!("Failed to register serial input fd: {}", err);
}
if let Err(err) = ops.add(Events::new(&buf_ready_evt, EventSet::IN)) {
warn!("Failed to register serial buffer ready event: {}", err);
}
}
}
}
/// Checks whether the given file descriptor is a FIFO pipe.
fn is_fifo(fd: RawFd) -> bool {
let mut stat = std::mem::MaybeUninit::<libc::stat>::uninit();
// SAFETY: No unsafety can be introduced by passing in an invalid file descriptor to fstat,
// it will return -1 and set errno to EBADF. The pointer passed to fstat is valid for writing
// a libc::stat structure.
if unsafe { libc::fstat(fd, stat.as_mut_ptr()) } < 0 {
return false;
}
// SAFETY: We can safely assume the libc::stat structure to be initialized, as libc::fstat
// returning 0 guarantees that the memory is now initialized with the requested file metadata.
let stat = unsafe { stat.assume_init() };
(stat.st_mode & libc::S_IFIFO) != 0
}
impl<I> BusDevice for SerialWrapper<EventFdTrigger, SerialEventsWrapper, I>
where
I: Read + AsRawFd + Send,
{
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
if let (Ok(offset), 1) = (u8::try_from(offset), data.len()) {
data[0] = self.serial.read(offset);
} else {
METRICS.missed_read_count.inc();
}
}
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
if let (Ok(offset), 1) = (u8::try_from(offset), data.len()) {
if let Err(err) = self.serial.write(offset, data[0]) {
// Counter incremented for any handle_write() error.
error!("Failed the write to serial: {:?}", err);
METRICS.error_count.inc();
}
} else {
METRICS.missed_write_count.inc();
}
None
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::logger::IncMetric;
#[test]
fn test_serial_bus_read() {
let intr_evt = EventFdTrigger::new(EventFd::new(libc::EFD_NONBLOCK).unwrap());
let metrics = &METRICS;
let mut serial = SerialDevice {
serial: Serial::with_events(
intr_evt,
SerialEventsWrapper {
buffer_ready_event_fd: None,
},
SerialOut::Sink,
),
input: None::<std::io::Stdin>,
};
serial.serial.raw_input(b"abc").unwrap();
let invalid_reads_before = metrics.missed_read_count.count();
let mut v = [0x00; 2];
serial.read(0x0, 0u64, &mut v);
let invalid_reads_after = metrics.missed_read_count.count();
assert_eq!(invalid_reads_before + 1, invalid_reads_after);
let mut v = [0x00; 1];
serial.read(0x0, 0u64, &mut v);
assert_eq!(v[0], b'a');
let invalid_reads_after_2 = metrics.missed_read_count.count();
// The `invalid_read_count` metric should be the same as before the one-byte reads.
assert_eq!(invalid_reads_after_2, invalid_reads_after);
}
#[test]
fn test_is_fifo() {
// invalid file descriptors arent fifos
let invalid = -1;
assert!(!is_fifo(invalid));
// Fifos are fifos
let mut fds: [libc::c_int; 2] = [0; 2];
let rc = unsafe { libc::pipe(fds.as_mut_ptr()) };
assert!(rc == 0);
assert!(is_fifo(fds[0]));
assert!(is_fifo(fds[1]));
// Files arent fifos
let tmp_file = vmm_sys_util::tempfile::TempFile::new().unwrap();
assert!(!is_fifo(tmp_file.as_file().as_raw_fd()));
}
#[test]
fn test_serial_dev_metrics() {
let serial_metrics: SerialDeviceMetrics = SerialDeviceMetrics::new();
let serial_metrics_local: String = serde_json::to_string(&serial_metrics).unwrap();
// the 1st serialize flushes the metrics and resets values to 0 so that
// we can compare the values with local metrics.
serde_json::to_string(&METRICS).unwrap();
let serial_metrics_global: String = serde_json::to_string(&METRICS).unwrap();
assert_eq!(serial_metrics_local, serial_metrics_global);
serial_metrics.read_count.inc();
assert_eq!(serial_metrics.read_count.count(), 1);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/legacy/i8042.rs | src/vmm/src/devices/legacy/i8042.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::io;
use std::num::Wrapping;
use std::sync::{Arc, Barrier};
use log::warn;
use serde::Serialize;
use vmm_sys_util::eventfd::EventFd;
use crate::logger::{IncMetric, SharedIncMetric, error};
use crate::vstate::bus::BusDevice;
/// Errors thrown by the i8042 device.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum I8042Error {
/// i8042 internal buffer full.
InternalBufferFull,
/// Keyboard interrupt disabled by guest driver.
KbdInterruptDisabled,
/// Could not trigger keyboard interrupt: {0}.
KbdInterruptFailure(io::Error),
}
/// Metrics specific to the i8042 device.
#[derive(Debug, Serialize)]
pub(super) struct I8042DeviceMetrics {
/// Errors triggered while using the i8042 device.
error_count: SharedIncMetric,
/// Number of superfluous read intents on this i8042 device.
missed_read_count: SharedIncMetric,
/// Number of superfluous write intents on this i8042 device.
missed_write_count: SharedIncMetric,
/// Bytes read by this device.
read_count: SharedIncMetric,
/// Number of resets done by this device.
reset_count: SharedIncMetric,
/// Bytes written by this device.
write_count: SharedIncMetric,
}
impl I8042DeviceMetrics {
/// Const default construction.
const fn new() -> Self {
Self {
error_count: SharedIncMetric::new(),
missed_read_count: SharedIncMetric::new(),
missed_write_count: SharedIncMetric::new(),
read_count: SharedIncMetric::new(),
reset_count: SharedIncMetric::new(),
write_count: SharedIncMetric::new(),
}
}
}
/// Stores aggregated metrics
pub(super) static METRICS: I8042DeviceMetrics = I8042DeviceMetrics::new();
/// Offset of the status port (port 0x64)
const OFS_STATUS: u64 = 4;
/// Offset of the data port (port 0x60)
const OFS_DATA: u64 = 0;
/// i8042 commands
/// These values are written by the guest driver to port 0x64.
const CMD_READ_CTR: u8 = 0x20; // Read control register
const CMD_WRITE_CTR: u8 = 0x60; // Write control register
const CMD_READ_OUTP: u8 = 0xD0; // Read output port
const CMD_WRITE_OUTP: u8 = 0xD1; // Write output port
const CMD_RESET_CPU: u8 = 0xFE; // Reset CPU
/// i8042 status register bits
const SB_OUT_DATA_AVAIL: u8 = 0x0001; // Data available at port 0x60
const SB_I8042_CMD_DATA: u8 = 0x0008; // i8042 expecting command parameter at port 0x60
const SB_KBD_ENABLED: u8 = 0x0010; // 1 = kbd enabled, 0 = kbd locked
/// i8042 control register bits
const CB_KBD_INT: u8 = 0x0001; // kbd interrupt enabled
const CB_POST_OK: u8 = 0x0004; // POST ok (should always be 1)
/// Key scan codes
const KEY_CTRL: u16 = 0x0014;
const KEY_ALT: u16 = 0x0011;
const KEY_DEL: u16 = 0xE071;
/// Internal i8042 buffer size, in bytes
const BUF_SIZE: usize = 16;
/// A i8042 PS/2 controller that emulates just enough to shutdown the machine.
#[derive(Debug)]
pub struct I8042Device {
/// CPU reset eventfd. We will set this event when the guest issues CMD_RESET_CPU.
reset_evt: EventFd,
/// Keyboard interrupt event (IRQ 1).
pub kbd_interrupt_evt: EventFd,
/// The i8042 status register.
status: u8,
/// The i8042 control register.
control: u8,
/// The i8042 output port.
outp: u8,
/// The last command sent to port 0x64.
cmd: u8,
/// The internal i8042 data buffer.
buf: [u8; BUF_SIZE],
bhead: Wrapping<usize>,
btail: Wrapping<usize>,
}
impl I8042Device {
/// Constructs an i8042 device that will signal the given event when the guest requests it.
pub fn new(reset_evt: EventFd) -> Result<I8042Device, std::io::Error> {
Ok(I8042Device {
reset_evt,
kbd_interrupt_evt: EventFd::new(libc::EFD_NONBLOCK)?,
control: CB_POST_OK | CB_KBD_INT,
cmd: 0,
outp: 0,
status: SB_KBD_ENABLED,
buf: [0; BUF_SIZE],
bhead: Wrapping(0),
btail: Wrapping(0),
})
}
/// Signal a ctrl-alt-del (reset) event.
#[inline]
pub fn trigger_ctrl_alt_del(&mut self) -> Result<(), I8042Error> {
// The CTRL+ALT+DEL sequence is 4 bytes in total (1 extended key + 2 normal keys).
// Fail if we don't have room for the whole sequence.
if BUF_SIZE - self.buf_len() < 4 {
return Err(I8042Error::InternalBufferFull);
}
self.trigger_key(KEY_CTRL)?;
self.trigger_key(KEY_ALT)?;
self.trigger_key(KEY_DEL)?;
Ok(())
}
fn trigger_kbd_interrupt(&self) -> Result<(), I8042Error> {
if (self.control & CB_KBD_INT) == 0 {
warn!("Failed to trigger i8042 kbd interrupt (disabled by guest OS)");
return Err(I8042Error::KbdInterruptDisabled);
}
self.kbd_interrupt_evt
.write(1)
.map_err(I8042Error::KbdInterruptFailure)
}
fn trigger_key(&mut self, key: u16) -> Result<(), I8042Error> {
if key & 0xff00 != 0 {
// Check if there is enough room in the buffer, before pushing an extended (2-byte) key.
if BUF_SIZE - self.buf_len() < 2 {
return Err(I8042Error::InternalBufferFull);
}
self.push_byte((key >> 8) as u8)?;
}
self.push_byte((key & 0xff) as u8)?;
match self.trigger_kbd_interrupt() {
Ok(_) | Err(I8042Error::KbdInterruptDisabled) => Ok(()),
Err(err) => Err(err),
}
}
#[inline]
fn push_byte(&mut self, byte: u8) -> Result<(), I8042Error> {
self.status |= SB_OUT_DATA_AVAIL;
if self.buf_len() == BUF_SIZE {
return Err(I8042Error::InternalBufferFull);
}
self.buf[self.btail.0 % BUF_SIZE] = byte;
self.btail += Wrapping(1usize);
Ok(())
}
#[inline]
fn pop_byte(&mut self) -> Option<u8> {
if self.buf_len() == 0 {
return None;
}
let res = self.buf[self.bhead.0 % BUF_SIZE];
self.bhead += Wrapping(1usize);
if self.buf_len() == 0 {
self.status &= !SB_OUT_DATA_AVAIL;
}
Some(res)
}
#[inline]
fn flush_buf(&mut self) {
self.bhead = Wrapping(0usize);
self.btail = Wrapping(0usize);
self.status &= !SB_OUT_DATA_AVAIL;
}
#[inline]
fn buf_len(&self) -> usize {
(self.btail - self.bhead).0
}
}
impl BusDevice for I8042Device {
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
// All our ports are byte-wide. We don't know how to handle any wider data.
if data.len() != 1 {
METRICS.missed_read_count.inc();
return;
}
let mut read_ok = true;
match offset {
OFS_STATUS => data[0] = self.status,
OFS_DATA => {
// The guest wants to read a byte from port 0x60. For the 8042, that means the top
// byte in the internal buffer. If the buffer is empty, the guest will get a 0.
data[0] = self.pop_byte().unwrap_or(0);
// Check if we still have data in the internal buffer. If so, we need to trigger
// another interrupt, to let the guest know they need to issue another read from
// port 0x60.
if (self.status & SB_OUT_DATA_AVAIL) != 0
&& let Err(I8042Error::KbdInterruptFailure(err)) = self.trigger_kbd_interrupt()
{
warn!("Failed to trigger i8042 kbd interrupt {:?}", err);
}
}
_ => read_ok = false,
}
if read_ok {
METRICS.read_count.add(data.len() as u64);
} else {
METRICS.missed_read_count.inc();
}
}
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
// All our ports are byte-wide. We don't know how to handle any wider data.
if data.len() != 1 {
METRICS.missed_write_count.inc();
return None;
}
let mut write_ok = true;
match offset {
OFS_STATUS if data[0] == CMD_RESET_CPU => {
// The guest wants to assert the CPU reset line. We handle that by triggering
// our exit event fd. Meaning Firecracker will be exiting as soon as the VMM
// thread wakes up to handle this event.
if let Err(err) = self.reset_evt.write(1) {
error!("Failed to trigger i8042 reset event: {:?}", err);
METRICS.error_count.inc();
}
METRICS.reset_count.inc();
}
OFS_STATUS if data[0] == CMD_READ_CTR => {
// The guest wants to read the control register.
// Let's make sure only the control register will be available for reading from
// the data port, for the next inb(0x60).
self.flush_buf();
let control = self.control;
// Buffer is empty, push() will always succeed.
self.push_byte(control).unwrap();
}
OFS_STATUS if data[0] == CMD_WRITE_CTR => {
// The guest wants to write the control register. This is a two-step command:
// 1. port 0x64 < CMD_WRITE_CTR
// 2. port 0x60 < <control reg value>
// Make sure we'll be expecting the control reg value on port 0x60 for the next
// write.
self.flush_buf();
self.status |= SB_I8042_CMD_DATA;
self.cmd = data[0];
}
OFS_STATUS if data[0] == CMD_READ_OUTP => {
// The guest wants to read the output port (for lack of a better name - this is
// just another register on the 8042, that happens to also have its bits connected
// to some output pins of the 8042).
self.flush_buf();
let outp = self.outp;
// Buffer is empty, push() will always succeed.
self.push_byte(outp).unwrap();
}
OFS_STATUS if data[0] == CMD_WRITE_OUTP => {
// Similar to writing the control register, this is a two-step command.
// I.e. write CMD_WRITE_OUTP at port 0x64, then write the actual out port value
// to port 0x60.
self.status |= SB_I8042_CMD_DATA;
self.cmd = data[0];
}
OFS_DATA if (self.status & SB_I8042_CMD_DATA) != 0 => {
// The guest is writing to port 0x60. This byte can either be:
// 1. the payload byte of a CMD_WRITE_CTR or CMD_WRITE_OUTP command, in which case
// the status reg bit SB_I8042_CMD_DATA will be set, or
// 2. a direct command sent to the keyboard
// This match arm handles the first option (when the SB_I8042_CMD_DATA bit is set).
match self.cmd {
CMD_WRITE_CTR => self.control = data[0],
CMD_WRITE_OUTP => self.outp = data[0],
_ => (),
}
self.status &= !SB_I8042_CMD_DATA;
}
OFS_DATA => {
// The guest is sending a command straight to the keyboard (so this byte is not
// addressed to the 8042, but to the keyboard). Since we're emulating a pretty
// dumb keyboard, we can get away with blindly ack-in anything (byte 0xFA).
// Something along the lines of "Yeah, uhm-uhm, yeah, okay, honey, that's great."
self.flush_buf();
// Buffer is empty, push() will always succeed.
self.push_byte(0xFA).unwrap();
if let Err(I8042Error::KbdInterruptFailure(err)) = self.trigger_kbd_interrupt() {
warn!("Failed to trigger i8042 kbd interrupt {:?}", err);
}
}
_ => {
write_ok = false;
}
}
if write_ok {
METRICS.write_count.inc();
} else {
METRICS.missed_write_count.inc();
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
impl PartialEq for I8042Error {
fn eq(&self, other: &I8042Error) -> bool {
self.to_string() == other.to_string()
}
}
#[test]
fn test_i8042_read_write_and_event() {
let mut i8042 = I8042Device::new(EventFd::new(libc::EFD_NONBLOCK).unwrap()).unwrap();
let reset_evt = i8042.reset_evt.try_clone().unwrap();
// Check if reading in a 2-length array doesn't have side effects.
let mut data = [1, 2];
i8042.read(0x0, 0, &mut data);
assert_eq!(data, [1, 2]);
i8042.read(0x0, 1, &mut data);
assert_eq!(data, [1, 2]);
// Check if reset works.
// Write 1 to the reset event fd, so that read doesn't block in case the event fd
// counter doesn't change (for 0 it blocks).
reset_evt.write(1).unwrap();
let mut data = [CMD_RESET_CPU];
i8042.write(0x0, OFS_STATUS, &data);
assert_eq!(reset_evt.read().unwrap(), 2);
// Check if reading with offset 1 doesn't have side effects.
i8042.read(0x0, 1, &mut data);
assert_eq!(data[0], CMD_RESET_CPU);
// Check invalid `write`s.
let before = METRICS.missed_write_count.count();
// offset != 0.
i8042.write(0x0, 1, &data);
// data != CMD_RESET_CPU
data[0] = CMD_RESET_CPU + 1;
i8042.write(0x0, 1, &data);
// data.len() != 1
let data = [CMD_RESET_CPU; 2];
i8042.write(0x0, 1, &data);
assert_eq!(METRICS.missed_write_count.count(), before + 3);
}
#[test]
fn test_i8042_commands() {
let mut i8042 = I8042Device::new(EventFd::new(libc::EFD_NONBLOCK).unwrap()).unwrap();
let mut data = [1];
// Test reading/writing the control register.
data[0] = CMD_WRITE_CTR;
i8042.write(0x0, OFS_STATUS, &data);
assert_ne!(i8042.status & SB_I8042_CMD_DATA, 0);
data[0] = 0x52;
i8042.write(0x0, OFS_DATA, &data);
data[0] = CMD_READ_CTR;
i8042.write(0x0, OFS_STATUS, &data);
assert_ne!(i8042.status & SB_OUT_DATA_AVAIL, 0);
i8042.read(0x0, OFS_DATA, &mut data);
assert_eq!(data[0], 0x52);
// Test reading/writing the output port.
data[0] = CMD_WRITE_OUTP;
i8042.write(0x0, OFS_STATUS, &data);
assert_ne!(i8042.status & SB_I8042_CMD_DATA, 0);
data[0] = 0x52;
i8042.write(0x0, OFS_DATA, &data);
data[0] = CMD_READ_OUTP;
i8042.write(0x0, OFS_STATUS, &data);
assert_ne!(i8042.status & SB_OUT_DATA_AVAIL, 0);
i8042.read(0x0, OFS_DATA, &mut data);
assert_eq!(data[0], 0x52);
// Test kbd commands.
data[0] = 0x52;
i8042.write(0x0, OFS_DATA, &data);
assert_ne!(i8042.status & SB_OUT_DATA_AVAIL, 0);
i8042.read(0x0, OFS_DATA, &mut data);
assert_eq!(data[0], 0xFA);
}
#[test]
fn test_i8042_buffer() {
let mut i8042 = I8042Device::new(EventFd::new(libc::EFD_NONBLOCK).unwrap()).unwrap();
// Test push/pop.
i8042.push_byte(52).unwrap();
assert_ne!(i8042.status & SB_OUT_DATA_AVAIL, 0);
assert_eq!(i8042.pop_byte().unwrap(), 52);
assert_eq!(i8042.status & SB_OUT_DATA_AVAIL, 0);
// Test empty buffer pop.
assert!(i8042.pop_byte().is_none());
// Test buffer full.
for i in 0..BUF_SIZE {
i8042.push_byte(i.try_into().unwrap()).unwrap();
assert_eq!(i8042.buf_len(), i + 1);
}
assert_eq!(
i8042.push_byte(0).unwrap_err(),
I8042Error::InternalBufferFull
);
}
#[test]
fn test_i8042_kbd() {
let mut i8042 = I8042Device::new(EventFd::new(libc::EFD_NONBLOCK).unwrap()).unwrap();
fn expect_key(i8042: &mut I8042Device, key: u16) {
let mut data = [1];
// The interrupt line should be on.
i8042.trigger_kbd_interrupt().unwrap();
assert!(i8042.kbd_interrupt_evt.read().unwrap() > 1);
// The "data available" flag should be on.
i8042.read(0x0, OFS_STATUS, &mut data);
let mut key_byte: u8;
if key & 0xFF00 != 0 {
// For extended keys, we should be able to read the MSB first.
key_byte = ((key & 0xFF00) >> 8) as u8;
i8042.read(0x0, OFS_DATA, &mut data);
assert_eq!(data[0], key_byte);
// And then do the same for the LSB.
// The interrupt line should be on.
i8042.trigger_kbd_interrupt().unwrap();
assert!(i8042.kbd_interrupt_evt.read().unwrap() > 1);
// The "data available" flag should be on.
i8042.read(0x0, OFS_STATUS, &mut data);
}
key_byte = (key & 0xFF) as u8;
i8042.read(0x0, OFS_DATA, &mut data);
assert_eq!(data[0], key_byte);
}
// Test key trigger.
i8042.trigger_key(KEY_CTRL).unwrap();
expect_key(&mut i8042, KEY_CTRL);
// Test extended key trigger.
i8042.trigger_key(KEY_DEL).unwrap();
expect_key(&mut i8042, KEY_DEL);
// Test CTRL+ALT+DEL trigger.
i8042.trigger_ctrl_alt_del().unwrap();
expect_key(&mut i8042, KEY_CTRL);
expect_key(&mut i8042, KEY_ALT);
expect_key(&mut i8042, KEY_DEL);
// Almost fill up the buffer, so we can test trigger failures.
for _i in 0..BUF_SIZE - 1 {
i8042.push_byte(1).unwrap();
}
// Test extended key trigger failure.
assert_eq!(i8042.buf_len(), BUF_SIZE - 1);
assert_eq!(
i8042.trigger_key(KEY_DEL).unwrap_err(),
I8042Error::InternalBufferFull
);
// Test ctrl+alt+del trigger failure.
i8042.pop_byte().unwrap();
i8042.pop_byte().unwrap();
assert_eq!(i8042.buf_len(), BUF_SIZE - 3);
assert_eq!(
i8042.trigger_ctrl_alt_del().unwrap_err(),
I8042Error::InternalBufferFull
);
// Test kbd interrupt disable.
let mut data = [1];
data[0] = CMD_WRITE_CTR;
i8042.write(0x0, OFS_STATUS, &data);
data[0] = i8042.control & !CB_KBD_INT;
i8042.write(0x0, OFS_DATA, &data);
i8042.trigger_key(KEY_CTRL).unwrap();
assert_eq!(
i8042.trigger_kbd_interrupt().unwrap_err(),
I8042Error::KbdInterruptDisabled
)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/legacy/mod.rs | src/vmm/src/devices/legacy/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Implements legacy devices (UART, RTC etc).
mod i8042;
#[cfg(target_arch = "aarch64")]
pub mod rtc_pl031;
pub mod serial;
use std::io;
use std::ops::Deref;
use serde::Serializer;
use serde::ser::SerializeMap;
use vm_superio::Trigger;
use vmm_sys_util::eventfd::EventFd;
pub use self::i8042::{I8042Device, I8042Error as I8042DeviceError};
#[cfg(target_arch = "aarch64")]
pub use self::rtc_pl031::RTCDevice;
pub use self::serial::{
IER_RDA_BIT, IER_RDA_OFFSET, SerialDevice, SerialEventsWrapper, SerialWrapper,
};
/// Wrapper for implementing the trigger functionality for `EventFd`.
///
/// The trigger is used for handling events in the legacy devices.
#[derive(Debug)]
pub struct EventFdTrigger(EventFd);
impl Trigger for EventFdTrigger {
type E = io::Error;
fn trigger(&self) -> io::Result<()> {
self.write(1)
}
}
impl Deref for EventFdTrigger {
type Target = EventFd;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl EventFdTrigger {
/// Clone an `EventFdTrigger`.
pub fn try_clone(&self) -> io::Result<Self> {
Ok(EventFdTrigger((**self).try_clone()?))
}
/// Create an `EventFdTrigger`.
pub fn new(evt: EventFd) -> Self {
Self(evt)
}
/// Get the associated event fd out of an `EventFdTrigger`.
pub fn get_event(&self) -> EventFd {
self.0.try_clone().unwrap()
}
}
/// Called by METRICS.flush(), this function facilitates serialization of aggregated metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let mut seq = serializer.serialize_map(Some(1))?;
seq.serialize_entry("i8042", &i8042::METRICS)?;
#[cfg(target_arch = "aarch64")]
seq.serialize_entry("rtc", &rtc_pl031::METRICS)?;
seq.serialize_entry("uart", &serial::METRICS)?;
seq.end()
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/legacy/rtc_pl031.rs | src/vmm/src/devices/legacy/rtc_pl031.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::TryInto;
use serde::Serialize;
use vm_superio::Rtc;
use vm_superio::rtc_pl031::RtcEvents;
use crate::logger::{IncMetric, SharedIncMetric, warn};
/// Metrics specific to the RTC device.
#[derive(Debug, Serialize, Default)]
pub struct RTCDeviceMetrics {
/// Errors triggered while using the RTC device.
pub error_count: SharedIncMetric,
/// Number of superfluous read intents on this RTC device.
pub missed_read_count: SharedIncMetric,
/// Number of superfluous write intents on this RTC device.
pub missed_write_count: SharedIncMetric,
}
impl RTCDeviceMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
error_count: SharedIncMetric::new(),
missed_read_count: SharedIncMetric::new(),
missed_write_count: SharedIncMetric::new(),
}
}
}
impl RtcEvents for RTCDeviceMetrics {
fn invalid_read(&self) {
self.missed_read_count.inc();
self.error_count.inc();
warn!("Guest read at invalid offset.")
}
fn invalid_write(&self) {
self.missed_write_count.inc();
self.error_count.inc();
warn!("Guest write at invalid offset.")
}
}
impl RtcEvents for &'static RTCDeviceMetrics {
fn invalid_read(&self) {
RTCDeviceMetrics::invalid_read(self);
}
fn invalid_write(&self) {
RTCDeviceMetrics::invalid_write(self);
}
}
/// Stores aggregated metrics
pub static METRICS: RTCDeviceMetrics = RTCDeviceMetrics::new();
/// Wrapper over vm_superio's RTC implementation.
#[derive(Debug)]
pub struct RTCDevice(vm_superio::Rtc<&'static RTCDeviceMetrics>);
impl Default for RTCDevice {
fn default() -> Self {
RTCDevice(Rtc::with_events(&METRICS))
}
}
impl RTCDevice {
pub fn new() -> RTCDevice {
Default::default()
}
}
impl std::ops::Deref for RTCDevice {
type Target = vm_superio::Rtc<&'static RTCDeviceMetrics>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for RTCDevice {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
// Implements Bus functions for AMBA PL031 RTC device
impl RTCDevice {
pub fn bus_read(&mut self, offset: u64, data: &mut [u8]) {
if let (Ok(offset), 4) = (u16::try_from(offset), data.len()) {
// read() function from RTC implementation expects a slice of
// len 4, and we just validated that this is the data length
self.read(offset, data.try_into().unwrap())
} else {
warn!(
"Found invalid data offset/length while trying to read from the RTC: {}, {}",
offset,
data.len()
);
METRICS.error_count.inc();
}
}
pub fn bus_write(&mut self, offset: u64, data: &[u8]) {
if let (Ok(offset), 4) = (u16::try_from(offset), data.len()) {
// write() function from RTC implementation expects a slice of
// len 4, and we just validated that this is the data length
self.write(offset, data.try_into().unwrap())
} else {
warn!(
"Found invalid data offset/length while trying to write to the RTC: {}, {}",
offset,
data.len()
);
METRICS.error_count.inc();
}
}
}
#[cfg(target_arch = "aarch64")]
impl crate::vstate::bus::BusDevice for RTCDevice {
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
self.bus_read(offset, data)
}
fn write(
&mut self,
_base: u64,
offset: u64,
data: &[u8],
) -> Option<std::sync::Arc<std::sync::Barrier>> {
self.bus_write(offset, data);
None
}
}
#[cfg(test)]
mod tests {
use vm_superio::Rtc;
use super::*;
use crate::logger::IncMetric;
#[test]
fn test_rtc_device() {
static TEST_RTC_DEVICE_METRICS: RTCDeviceMetrics = RTCDeviceMetrics::new();
let mut rtc_pl031 = RTCDevice(Rtc::with_events(&TEST_RTC_DEVICE_METRICS));
let data = [0; 4];
// Write to the DR register. Since this is a RO register, the write
// function should fail.
let invalid_writes_before = TEST_RTC_DEVICE_METRICS.missed_write_count.count();
let error_count_before = TEST_RTC_DEVICE_METRICS.error_count.count();
rtc_pl031.bus_write(0x000, &data);
let invalid_writes_after = TEST_RTC_DEVICE_METRICS.missed_write_count.count();
let error_count_after = TEST_RTC_DEVICE_METRICS.error_count.count();
assert_eq!(invalid_writes_after - invalid_writes_before, 1);
assert_eq!(error_count_after - error_count_before, 1);
}
#[test]
fn test_rtc_invalid_buf_len() {
static TEST_RTC_INVALID_BUF_LEN_METRICS: RTCDeviceMetrics = RTCDeviceMetrics::new();
let mut rtc_pl031 = RTCDevice(Rtc::with_events(&TEST_RTC_INVALID_BUF_LEN_METRICS));
let write_data_good = 123u32.to_le_bytes();
let mut data_bad = [0; 2];
let mut read_data_good = [0; 4];
rtc_pl031.bus_write(0x008, &write_data_good);
rtc_pl031.bus_write(0x008, &data_bad);
rtc_pl031.bus_read(0x008, &mut read_data_good);
rtc_pl031.bus_read(0x008, &mut data_bad);
assert_eq!(u32::from_le_bytes(read_data_good), 123);
assert_eq!(u16::from_le_bytes(data_bad), 0);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/pseudo/boot_timer.rs | src/vmm/src/devices/pseudo/boot_timer.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::sync::{Arc, Barrier};
use utils::time::TimestampUs;
use crate::logger::info;
use crate::vstate::bus::BusDevice;
const MAGIC_VALUE_SIGNAL_GUEST_BOOT_COMPLETE: u8 = 123;
/// Pseudo device to record the kernel boot time.
#[derive(Debug, Clone)]
pub struct BootTimer {
start_ts: TimestampUs,
}
impl BusDevice for BootTimer {
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
// Only handle byte length instructions at a zero offset.
if data.len() != 1 || offset != 0 {
return None;
}
if data[0] == MAGIC_VALUE_SIGNAL_GUEST_BOOT_COMPLETE {
let now_tm_us = TimestampUs::default();
let boot_time_us = now_tm_us.time_us - self.start_ts.time_us;
let boot_time_cpu_us = now_tm_us.cputime_us - self.start_ts.cputime_us;
info!(
"Guest-boot-time = {:>6} us {} ms, {:>6} CPU us {} CPU ms",
boot_time_us,
boot_time_us / 1000,
boot_time_cpu_us,
boot_time_cpu_us / 1000
);
}
None
}
fn read(&mut self, _base: u64, _offset: u64, _data: &mut [u8]) {}
}
impl BootTimer {
/// Create a device at a certain point in time.
pub fn new(start_ts: TimestampUs) -> BootTimer {
BootTimer { start_ts }
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/pseudo/mod.rs | src/vmm/src/devices/pseudo/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Implements Firecracker specific devices (e.g. signal when boot is completed).
mod boot_timer;
pub use self::boot_timer::BootTimer;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/pci/pci_segment.rs | src/vmm/src/devices/pci/pci_segment.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// Copyright © 2019 - 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
//
use std::sync::{Arc, Mutex};
#[cfg(target_arch = "x86_64")]
use acpi_tables::{Aml, aml};
use log::info;
use pci::PciBdf;
#[cfg(target_arch = "x86_64")]
use uuid::Uuid;
use vm_allocator::AddressAllocator;
use crate::arch::{ArchVm as Vm, PCI_MMCONFIG_START, PCI_MMIO_CONFIG_SIZE_PER_SEGMENT};
#[cfg(target_arch = "x86_64")]
use crate::pci::bus::{PCI_CONFIG_IO_PORT, PCI_CONFIG_IO_PORT_SIZE};
use crate::pci::bus::{PciBus, PciConfigIo, PciConfigMmio, PciRoot, PciRootError};
use crate::vstate::bus::{BusDeviceSync, BusError};
use crate::vstate::resources::ResourceAllocator;
pub struct PciSegment {
pub(crate) id: u16,
pub(crate) pci_bus: Arc<Mutex<PciBus>>,
pub(crate) pci_config_mmio: Arc<Mutex<PciConfigMmio>>,
pub(crate) mmio_config_address: u64,
pub(crate) proximity_domain: u32,
#[cfg(target_arch = "x86_64")]
pub(crate) pci_config_io: Option<Arc<Mutex<PciConfigIo>>>,
// Bitmap of PCI devices to hotplug.
pub(crate) pci_devices_up: u32,
// Bitmap of PCI devices to hotunplug.
pub(crate) pci_devices_down: u32,
// List of allocated IRQs for each PCI slot.
pub(crate) pci_irq_slots: [u8; 32],
// Device memory covered by this segment
pub(crate) start_of_mem32_area: u64,
pub(crate) end_of_mem32_area: u64,
pub(crate) start_of_mem64_area: u64,
pub(crate) end_of_mem64_area: u64,
}
impl std::fmt::Debug for PciSegment {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PciSegment")
.field("id", &self.id)
.field("mmio_config_address", &self.mmio_config_address)
.field("proximity_domain", &self.proximity_domain)
.field("pci_devices_up", &self.pci_devices_up)
.field("pci_devices_down", &self.pci_devices_down)
.field("pci_irq_slots", &self.pci_irq_slots)
.field("start_of_mem32_area", &self.start_of_mem32_area)
.field("end_of_mem32_area", &self.end_of_mem32_area)
.field("start_of_mem64_area", &self.start_of_mem64_area)
.field("end_of_mem64_area", &self.end_of_mem64_area)
.finish()
}
}
impl PciSegment {
fn build(id: u16, vm: &Arc<Vm>, pci_irq_slots: &[u8; 32]) -> Result<PciSegment, BusError> {
let pci_root = PciRoot::new(None);
let pci_bus = Arc::new(Mutex::new(PciBus::new(pci_root, vm.clone())));
let pci_config_mmio = Arc::new(Mutex::new(PciConfigMmio::new(Arc::clone(&pci_bus))));
let mmio_config_address = PCI_MMCONFIG_START + PCI_MMIO_CONFIG_SIZE_PER_SEGMENT * id as u64;
vm.common.mmio_bus.insert(
Arc::clone(&pci_config_mmio) as Arc<dyn BusDeviceSync>,
mmio_config_address,
PCI_MMIO_CONFIG_SIZE_PER_SEGMENT,
)?;
let resource_allocator = vm.resource_allocator();
let start_of_mem32_area = resource_allocator.mmio32_memory.base();
let end_of_mem32_area = resource_allocator.mmio32_memory.end();
let start_of_mem64_area = resource_allocator.mmio64_memory.base();
let end_of_mem64_area = resource_allocator.mmio64_memory.end();
let segment = PciSegment {
id,
pci_bus,
pci_config_mmio,
mmio_config_address,
proximity_domain: 0,
pci_devices_up: 0,
pci_devices_down: 0,
#[cfg(target_arch = "x86_64")]
pci_config_io: None,
start_of_mem32_area,
end_of_mem32_area,
start_of_mem64_area,
end_of_mem64_area,
pci_irq_slots: *pci_irq_slots,
};
Ok(segment)
}
#[cfg(target_arch = "x86_64")]
pub(crate) fn new(
id: u16,
vm: &Arc<Vm>,
pci_irq_slots: &[u8; 32],
) -> Result<PciSegment, BusError> {
use crate::Vm;
let mut segment = Self::build(id, vm, pci_irq_slots)?;
let pci_config_io = Arc::new(Mutex::new(PciConfigIo::new(Arc::clone(&segment.pci_bus))));
vm.pio_bus.insert(
pci_config_io.clone(),
PCI_CONFIG_IO_PORT,
PCI_CONFIG_IO_PORT_SIZE,
)?;
segment.pci_config_io = Some(pci_config_io);
info!(
"pci: adding PCI segment: id={:#x}, PCI MMIO config address: {:#x}, mem32 area: \
[{:#x}-{:#x}], mem64 area: [{:#x}-{:#x}] IO area: [{PCI_CONFIG_IO_PORT:#x}-{:#x}]",
segment.id,
segment.mmio_config_address,
segment.start_of_mem32_area,
segment.end_of_mem32_area,
segment.start_of_mem64_area,
segment.end_of_mem64_area,
PCI_CONFIG_IO_PORT + PCI_CONFIG_IO_PORT_SIZE - 1
);
Ok(segment)
}
#[cfg(target_arch = "aarch64")]
pub(crate) fn new(
id: u16,
vm: &Arc<Vm>,
pci_irq_slots: &[u8; 32],
) -> Result<PciSegment, BusError> {
let segment = Self::build(id, vm, pci_irq_slots)?;
info!(
"pci: adding PCI segment: id={:#x}, PCI MMIO config address: {:#x}, mem32 area: \
[{:#x}-{:#x}], mem64 area: [{:#x}-{:#x}]",
segment.id,
segment.mmio_config_address,
segment.start_of_mem32_area,
segment.end_of_mem32_area,
segment.start_of_mem64_area,
segment.end_of_mem64_area,
);
Ok(segment)
}
pub(crate) fn next_device_bdf(&self) -> Result<PciBdf, PciRootError> {
Ok(PciBdf::new(
self.id,
0,
self.pci_bus
.lock()
.unwrap()
.next_device_id()?
.try_into()
.unwrap(),
0,
))
}
}
#[cfg(target_arch = "x86_64")]
struct PciDevSlot {
device_id: u8,
}
#[cfg(target_arch = "x86_64")]
impl Aml for PciDevSlot {
fn append_aml_bytes(&self, v: &mut Vec<u8>) -> Result<(), aml::AmlError> {
let sun = self.device_id;
let adr: u32 = (self.device_id as u32) << 16;
aml::Device::new(
format!("S{:03}", self.device_id).as_str().try_into()?,
vec![
&aml::Name::new("_SUN".try_into()?, &sun)?,
&aml::Name::new("_ADR".try_into()?, &adr)?,
&aml::Method::new(
"_EJ0".try_into()?,
1,
true,
vec![&aml::MethodCall::new(
"\\_SB_.PHPR.PCEJ".try_into()?,
vec![&aml::Path::new("_SUN")?, &aml::Path::new("_SEG")?],
)],
),
],
)
.append_aml_bytes(v)
}
}
#[cfg(target_arch = "x86_64")]
struct PciDevSlotNotify {
device_id: u8,
}
#[cfg(target_arch = "x86_64")]
impl Aml for PciDevSlotNotify {
fn append_aml_bytes(&self, v: &mut Vec<u8>) -> Result<(), aml::AmlError> {
let device_id_mask: u32 = 1 << self.device_id;
let object = aml::Path::new(&format!("S{:03}", self.device_id))?;
aml::And::new(&aml::Local(0), &aml::Arg(0), &device_id_mask).append_aml_bytes(v)?;
aml::If::new(
&aml::Equal::new(&aml::Local(0), &device_id_mask),
vec![&aml::Notify::new(&object, &aml::Arg(1))],
)
.append_aml_bytes(v)
}
}
#[cfg(target_arch = "x86_64")]
struct PciDevSlotMethods {}
#[cfg(target_arch = "x86_64")]
impl Aml for PciDevSlotMethods {
fn append_aml_bytes(&self, v: &mut Vec<u8>) -> Result<(), aml::AmlError> {
let mut device_notifies = Vec::new();
for device_id in 0..32 {
device_notifies.push(PciDevSlotNotify { device_id });
}
let mut device_notifies_refs: Vec<&dyn Aml> = Vec::new();
for device_notify in device_notifies.iter() {
device_notifies_refs.push(device_notify);
}
aml::Method::new("DVNT".try_into()?, 2, true, device_notifies_refs).append_aml_bytes(v)?;
aml::Method::new(
"PCNT".try_into()?,
0,
true,
vec![
&aml::Acquire::new("\\_SB_.PHPR.BLCK".try_into()?, 0xffff),
&aml::Store::new(
&aml::Path::new("\\_SB_.PHPR.PSEG")?,
&aml::Path::new("_SEG")?,
),
&aml::MethodCall::new(
"DVNT".try_into()?,
vec![&aml::Path::new("\\_SB_.PHPR.PCIU")?, &aml::ONE],
),
&aml::MethodCall::new(
"DVNT".try_into()?,
vec![&aml::Path::new("\\_SB_.PHPR.PCID")?, &3usize],
),
&aml::Release::new("\\_SB_.PHPR.BLCK".try_into()?),
],
)
.append_aml_bytes(v)
}
}
#[cfg(target_arch = "x86_64")]
struct PciDsmMethod {}
#[cfg(target_arch = "x86_64")]
impl Aml for PciDsmMethod {
fn append_aml_bytes(&self, v: &mut Vec<u8>) -> Result<(), aml::AmlError> {
// Refer to ACPI spec v6.3 Ch 9.1.1 and PCI Firmware spec v3.3 Ch 4.6.1
// _DSM (Device Specific Method), the following is the implementation in ASL.
// Method (_DSM, 4, NotSerialized) // _DSM: Device-Specific Method
// {
// If ((Arg0 == ToUUID ("e5c937d0-3553-4d7a-9117-ea4d19c3434d") /* Device Labeling
// Interface */)) {
// If ((Arg2 == Zero))
// {
// Return (Buffer (One) { 0x21 })
// }
// If ((Arg2 == 0x05))
// {
// Return (Zero)
// }
// }
//
// Return (Buffer (One) { 0x00 })
// }
//
// As per ACPI v6.3 Ch 19.6.142, the UUID is required to be in mixed endian:
// Among the fields of a UUID:
// {d1 (8 digits)} - {d2 (4 digits)} - {d3 (4 digits)} - {d4 (16 digits)}
// d1 ~ d3 need to be little endian, d4 be big endian.
// See https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding .
let uuid = Uuid::parse_str("E5C937D0-3553-4D7A-9117-EA4D19C3434D").unwrap();
let (uuid_d1, uuid_d2, uuid_d3, uuid_d4) = uuid.as_fields();
let mut uuid_buf = vec![];
uuid_buf.extend(uuid_d1.to_le_bytes());
uuid_buf.extend(uuid_d2.to_le_bytes());
uuid_buf.extend(uuid_d3.to_le_bytes());
uuid_buf.extend(uuid_d4);
aml::Method::new(
"_DSM".try_into()?,
4,
false,
vec![
&aml::If::new(
&aml::Equal::new(&aml::Arg(0), &aml::Buffer::new(uuid_buf)),
vec![
&aml::If::new(
&aml::Equal::new(&aml::Arg(2), &aml::ZERO),
vec![&aml::Return::new(&aml::Buffer::new(vec![0x21]))],
),
&aml::If::new(
&aml::Equal::new(&aml::Arg(2), &0x05u8),
vec![&aml::Return::new(&aml::ZERO)],
),
],
),
&aml::Return::new(&aml::Buffer::new(vec![0])),
],
)
.append_aml_bytes(v)
}
}
#[cfg(target_arch = "x86_64")]
impl Aml for PciSegment {
fn append_aml_bytes(&self, v: &mut Vec<u8>) -> Result<(), aml::AmlError> {
let mut pci_dsdt_inner_data: Vec<&dyn Aml> = Vec::new();
let hid = aml::Name::new("_HID".try_into()?, &aml::EisaName::new("PNP0A08")?)?;
pci_dsdt_inner_data.push(&hid);
let cid = aml::Name::new("_CID".try_into()?, &aml::EisaName::new("PNP0A03")?)?;
pci_dsdt_inner_data.push(&cid);
let adr = aml::Name::new("_ADR".try_into()?, &aml::ZERO)?;
pci_dsdt_inner_data.push(&adr);
let seg = aml::Name::new("_SEG".try_into()?, &self.id)?;
pci_dsdt_inner_data.push(&seg);
let uid = aml::Name::new("_UID".try_into()?, &aml::ZERO)?;
pci_dsdt_inner_data.push(&uid);
let cca = aml::Name::new("_CCA".try_into()?, &aml::ONE)?;
pci_dsdt_inner_data.push(&cca);
let supp = aml::Name::new("SUPP".try_into()?, &aml::ZERO)?;
pci_dsdt_inner_data.push(&supp);
let proximity_domain = self.proximity_domain;
let pxm_return = aml::Return::new(&proximity_domain);
let pxm = aml::Method::new("_PXM".try_into()?, 0, false, vec![&pxm_return]);
pci_dsdt_inner_data.push(&pxm);
let pci_dsm = PciDsmMethod {};
pci_dsdt_inner_data.push(&pci_dsm);
#[allow(clippy::if_same_then_else)]
let crs = if self.id == 0 {
aml::Name::new(
"_CRS".try_into()?,
&aml::ResourceTemplate::new(vec![
&aml::AddressSpace::new_bus_number(0x0u16, 0x0u16)?,
&aml::Io::new(0xcf8, 0xcf8, 1, 0x8),
&aml::Memory32Fixed::new(
true,
self.mmio_config_address.try_into().unwrap(),
PCI_MMIO_CONFIG_SIZE_PER_SEGMENT.try_into().unwrap(),
),
&aml::AddressSpace::new_memory(
aml::AddressSpaceCacheable::NotCacheable,
true,
self.start_of_mem32_area,
self.end_of_mem32_area,
)?,
&aml::AddressSpace::new_memory(
aml::AddressSpaceCacheable::NotCacheable,
true,
self.start_of_mem64_area,
self.end_of_mem64_area,
)?,
&aml::AddressSpace::new_io(0u16, 0x0cf7u16)?,
&aml::AddressSpace::new_io(0x0d00u16, 0xffffu16)?,
]),
)?
} else {
aml::Name::new(
"_CRS".try_into()?,
&aml::ResourceTemplate::new(vec![
&aml::AddressSpace::new_bus_number(0x0u16, 0x0u16)?,
&aml::Memory32Fixed::new(
true,
self.mmio_config_address.try_into().unwrap(),
PCI_MMIO_CONFIG_SIZE_PER_SEGMENT.try_into().unwrap(),
),
&aml::AddressSpace::new_memory(
aml::AddressSpaceCacheable::NotCacheable,
true,
self.start_of_mem32_area,
self.end_of_mem32_area,
)?,
&aml::AddressSpace::new_memory(
aml::AddressSpaceCacheable::NotCacheable,
true,
self.start_of_mem64_area,
self.end_of_mem64_area,
)?,
]),
)?
};
pci_dsdt_inner_data.push(&crs);
let mut pci_devices = Vec::new();
for device_id in 0..32 {
let pci_device = PciDevSlot { device_id };
pci_devices.push(pci_device);
}
for pci_device in pci_devices.iter() {
pci_dsdt_inner_data.push(pci_device);
}
let pci_device_methods = PciDevSlotMethods {};
pci_dsdt_inner_data.push(&pci_device_methods);
// Build PCI routing table, listing IRQs assigned to PCI devices.
let prt_package_list: Vec<(u32, u32)> = self
.pci_irq_slots
.iter()
.enumerate()
.map(|(i, irq)| {
(
((((u32::try_from(i).unwrap()) & 0x1fu32) << 16) | 0xffffu32),
*irq as u32,
)
})
.collect();
let prt_package_list: Vec<aml::Package> = prt_package_list
.iter()
.map(|(bdf, irq)| aml::Package::new(vec![bdf, &0u8, &0u8, irq]))
.collect();
let prt_package_list: Vec<&dyn Aml> = prt_package_list
.iter()
.map(|item| item as &dyn Aml)
.collect();
let prt = aml::Name::new("_PRT".try_into()?, &aml::Package::new(prt_package_list))?;
pci_dsdt_inner_data.push(&prt);
aml::Device::new(
format!("_SB_.PC{:02X}", self.id).as_str().try_into()?,
pci_dsdt_inner_data,
)
.append_aml_bytes(v)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::arch;
use crate::builder::tests::default_vmm;
use crate::utils::u64_to_usize;
#[test]
fn test_pci_segment_build() {
let vmm = default_vmm();
let pci_irq_slots = &[0u8; 32];
let pci_segment = PciSegment::new(0, &vmm.vm, pci_irq_slots).unwrap();
assert_eq!(pci_segment.id, 0);
assert_eq!(
pci_segment.start_of_mem32_area,
arch::MEM_32BIT_DEVICES_START
);
assert_eq!(
pci_segment.end_of_mem32_area,
arch::MEM_32BIT_DEVICES_START + arch::MEM_32BIT_DEVICES_SIZE - 1
);
assert_eq!(
pci_segment.start_of_mem64_area,
arch::MEM_64BIT_DEVICES_START
);
assert_eq!(
pci_segment.end_of_mem64_area,
arch::MEM_64BIT_DEVICES_START + arch::MEM_64BIT_DEVICES_SIZE - 1
);
assert_eq!(pci_segment.mmio_config_address, arch::PCI_MMCONFIG_START);
assert_eq!(pci_segment.proximity_domain, 0);
assert_eq!(pci_segment.pci_devices_up, 0);
assert_eq!(pci_segment.pci_devices_down, 0);
assert_eq!(pci_segment.pci_irq_slots, [0u8; 32]);
}
#[cfg(target_arch = "x86_64")]
#[test]
fn test_io_bus() {
let vmm = default_vmm();
let pci_irq_slots = &[0u8; 32];
let pci_segment = PciSegment::new(0, &vmm.vm, pci_irq_slots).unwrap();
let mut data = [0u8; u64_to_usize(PCI_CONFIG_IO_PORT_SIZE)];
vmm.vm.pio_bus.read(PCI_CONFIG_IO_PORT, &mut data).unwrap();
vmm.vm
.pio_bus
.read(PCI_CONFIG_IO_PORT + PCI_CONFIG_IO_PORT_SIZE, &mut data)
.unwrap_err();
}
#[test]
fn test_mmio_bus() {
let vmm = default_vmm();
let pci_irq_slots = &[0u8; 32];
let pci_segment = PciSegment::new(0, &vmm.vm, pci_irq_slots).unwrap();
let mut data = [0u8; u64_to_usize(PCI_MMIO_CONFIG_SIZE_PER_SEGMENT)];
vmm.vm
.common
.mmio_bus
.read(pci_segment.mmio_config_address, &mut data)
.unwrap();
vmm.vm
.common
.mmio_bus
.read(
pci_segment.mmio_config_address + PCI_MMIO_CONFIG_SIZE_PER_SEGMENT,
&mut data,
)
.unwrap_err();
}
#[test]
fn test_next_device_bdf() {
let vmm = default_vmm();
let pci_irq_slots = &[0u8; 32];
let pci_segment = PciSegment::new(0, &vmm.vm, pci_irq_slots).unwrap();
// Start checking from device id 1, since 0 is allocated to the Root port.
for dev_id in 1..32 {
let bdf = pci_segment.next_device_bdf().unwrap();
// In our case we have a single Segment with id 0, which has
// a single bus with id 0. Also, each device of ours has a
// single function.
assert_eq!(bdf, PciBdf::new(0, 0, dev_id, 0));
}
// We can only have 32 devices on a segment
pci_segment.next_device_bdf().unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/pci/mod.rs | src/vmm/src/devices/pci/mod.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod pci_segment;
pub use pci_segment::*;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vstate/vm.rs | src/vmm/src/vstate/vm.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::collections::HashMap;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::Path;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, Mutex, MutexGuard};
#[cfg(target_arch = "x86_64")]
use kvm_bindings::KVM_IRQCHIP_IOAPIC;
use kvm_bindings::{
KVM_IRQ_ROUTING_IRQCHIP, KVM_IRQ_ROUTING_MSI, KVM_MSI_VALID_DEVID, KvmIrqRouting,
kvm_irq_routing_entry, kvm_userspace_memory_region,
};
use kvm_ioctls::VmFd;
use log::debug;
use serde::{Deserialize, Serialize};
use vmm_sys_util::errno;
use vmm_sys_util::eventfd::EventFd;
pub use crate::arch::{ArchVm as Vm, ArchVmError, VmState};
use crate::arch::{GSI_MSI_END, host_page_size};
use crate::logger::info;
use crate::pci::{DeviceRelocation, DeviceRelocationError, PciDevice};
use crate::persist::CreateSnapshotError;
use crate::vmm_config::snapshot::SnapshotType;
use crate::vstate::bus::Bus;
use crate::vstate::interrupts::{InterruptError, MsixVector, MsixVectorConfig, MsixVectorGroup};
use crate::vstate::memory::{
GuestMemory, GuestMemoryExtension, GuestMemoryMmap, GuestMemoryRegion, GuestMemoryState,
GuestRegionMmap, GuestRegionMmapExt, MemoryError,
};
use crate::vstate::resources::ResourceAllocator;
use crate::vstate::vcpu::VcpuError;
use crate::{DirtyBitmap, Vcpu, mem_size_mib};
#[derive(Debug, Serialize, Deserialize)]
/// A struct representing an interrupt line used by some device of the microVM
pub struct RoutingEntry {
entry: kvm_irq_routing_entry,
masked: bool,
}
/// Architecture independent parts of a VM.
#[derive(Debug)]
pub struct VmCommon {
/// The KVM file descriptor used to access this Vm.
pub fd: VmFd,
max_memslots: u32,
/// The guest memory of this Vm.
pub guest_memory: GuestMemoryMmap,
next_kvm_slot: AtomicU32,
/// Interrupts used by Vm's devices
pub interrupts: Mutex<HashMap<u32, RoutingEntry>>,
/// Allocator for VM resources
pub resource_allocator: Mutex<ResourceAllocator>,
/// MMIO bus
pub mmio_bus: Arc<Bus>,
}
/// Errors associated with the wrappers over KVM ioctls.
/// Needs `rustfmt::skip` to make multiline comments work
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VmError {
/// Cannot set the memory regions: {0}
SetUserMemoryRegion(kvm_ioctls::Error),
/// Failed to create VM: {0}
CreateVm(kvm_ioctls::Error),
/// Failed to get KVM's dirty log: {0}
GetDirtyLog(kvm_ioctls::Error),
/// {0}
Arch(#[from] ArchVmError),
/// Error during eventfd operations: {0}
EventFd(std::io::Error),
/// Failed to create vcpu: {0}
CreateVcpu(VcpuError),
/// The number of configured slots is bigger than the maximum reported by KVM: {0}
NotEnoughMemorySlots(u32),
/// Failed to add a memory region: {0}
InsertRegion(#[from] vm_memory::GuestRegionCollectionError),
/// Error calling mincore: {0}
Mincore(vmm_sys_util::errno::Error),
/// ResourceAllocator error: {0}
ResourceAllocator(#[from] vm_allocator::Error),
/// MemoryError error: {0}
MemoryError(#[from] MemoryError),
}
/// Contains Vm functions that are usable across CPU architectures
impl Vm {
/// Create a KVM VM
pub fn create_common(kvm: &crate::vstate::kvm::Kvm) -> Result<VmCommon, VmError> {
// It is known that KVM_CREATE_VM occasionally fails with EINTR on heavily loaded machines
// with many VMs.
//
// The behavior itself that KVM_CREATE_VM can return EINTR is intentional. This is because
// the KVM_CREATE_VM path includes mm_take_all_locks() that is CPU intensive and all CPU
// intensive syscalls should check for pending signals and return EINTR immediately to allow
// userland to remain interactive.
// https://lists.nongnu.org/archive/html/qemu-devel/2014-01/msg01740.html
//
// However, it is empirically confirmed that, even though there is no pending signal,
// KVM_CREATE_VM returns EINTR.
// https://lore.kernel.org/qemu-devel/8735e0s1zw.wl-maz@kernel.org/
//
// To mitigate it, QEMU does an infinite retry on EINTR that greatly improves reliabiliy:
// - https://github.com/qemu/qemu/commit/94ccff133820552a859c0fb95e33a539e0b90a75
// - https://github.com/qemu/qemu/commit/bbde13cd14ad4eec18529ce0bf5876058464e124
//
// Similarly, we do retries up to 5 times. Although Firecracker clients are also able to
// retry, they have to start Firecracker from scratch. Doing retries in Firecracker makes
// recovery faster and improves reliability.
const MAX_ATTEMPTS: u32 = 5;
let mut attempt = 1;
let fd = loop {
match kvm.fd.create_vm() {
Ok(fd) => break fd,
Err(e) if e.errno() == libc::EINTR && attempt < MAX_ATTEMPTS => {
info!("Attempt #{attempt} of KVM_CREATE_VM returned EINTR");
// Exponential backoff (1us, 2us, 4us, and 8us => 15us in total)
std::thread::sleep(std::time::Duration::from_micros(2u64.pow(attempt - 1)));
}
Err(e) => return Err(VmError::CreateVm(e)),
}
attempt += 1;
};
Ok(VmCommon {
fd,
max_memslots: kvm.max_nr_memslots(),
guest_memory: GuestMemoryMmap::default(),
next_kvm_slot: AtomicU32::new(0),
interrupts: Mutex::new(HashMap::with_capacity(GSI_MSI_END as usize + 1)),
resource_allocator: Mutex::new(ResourceAllocator::new()),
mmio_bus: Arc::new(Bus::new()),
})
}
/// Creates the specified number of [`Vcpu`]s.
///
/// The returned [`EventFd`] is written to whenever any of the vcpus exit.
pub fn create_vcpus(&mut self, vcpu_count: u8) -> Result<(Vec<Vcpu>, EventFd), VmError> {
self.arch_pre_create_vcpus(vcpu_count)?;
let exit_evt = EventFd::new(libc::EFD_NONBLOCK).map_err(VmError::EventFd)?;
let mut vcpus = Vec::with_capacity(vcpu_count as usize);
for cpu_idx in 0..vcpu_count {
let exit_evt = exit_evt.try_clone().map_err(VmError::EventFd)?;
let vcpu = Vcpu::new(cpu_idx, self, exit_evt).map_err(VmError::CreateVcpu)?;
vcpus.push(vcpu);
}
self.arch_post_create_vcpus(vcpu_count)?;
Ok((vcpus, exit_evt))
}
/// Reserves the next `slot_cnt` contiguous kvm slot ids and returns the first one
pub fn next_kvm_slot(&self, slot_cnt: u32) -> Option<u32> {
let next = self
.common
.next_kvm_slot
.fetch_add(slot_cnt, Ordering::Relaxed);
if self.common.max_memslots <= next {
None
} else {
Some(next)
}
}
pub(crate) fn set_user_memory_region(
&self,
region: kvm_userspace_memory_region,
) -> Result<(), VmError> {
// SAFETY: Safe because the fd is a valid KVM file descriptor.
unsafe {
self.fd()
.set_user_memory_region(region)
.map_err(VmError::SetUserMemoryRegion)
}
}
fn register_memory_region(&mut self, region: Arc<GuestRegionMmapExt>) -> Result<(), VmError> {
let new_guest_memory = self
.common
.guest_memory
.insert_region(Arc::clone(®ion))?;
region
.slots()
.try_for_each(|(ref slot, plugged)| match plugged {
// if the slot is plugged, add it to kvm user memory regions
true => self.set_user_memory_region(slot.into()),
// if the slot is not plugged, protect accesses to it
false => slot.protect(true).map_err(VmError::MemoryError),
})?;
self.common.guest_memory = new_guest_memory;
Ok(())
}
/// Register a list of new memory regions to this [`Vm`].
pub fn register_dram_memory_regions(
&mut self,
regions: Vec<GuestRegionMmap>,
) -> Result<(), VmError> {
for region in regions {
let next_slot = self
.next_kvm_slot(1)
.ok_or(VmError::NotEnoughMemorySlots(self.common.max_memslots))?;
let arcd_region =
Arc::new(GuestRegionMmapExt::dram_from_mmap_region(region, next_slot));
self.register_memory_region(arcd_region)?
}
Ok(())
}
/// Register a new hotpluggable region to this [`Vm`].
pub fn register_hotpluggable_memory_region(
&mut self,
region: GuestRegionMmap,
slot_size: usize,
) -> Result<(), VmError> {
// caller should ensure the slot size divides the region length.
assert!(region.len().is_multiple_of(slot_size as u64));
let slot_cnt = (region.len() / (slot_size as u64))
.try_into()
.map_err(|_| VmError::NotEnoughMemorySlots(self.common.max_memslots))?;
let slot_from = self
.next_kvm_slot(slot_cnt)
.ok_or(VmError::NotEnoughMemorySlots(self.common.max_memslots))?;
let arcd_region = Arc::new(GuestRegionMmapExt::hotpluggable_from_mmap_region(
region, slot_from, slot_size,
));
self.register_memory_region(arcd_region)
}
/// Register a list of new memory regions to this [`Vm`].
///
/// Note: regions and state.regions need to be in the same order.
pub fn restore_memory_regions(
&mut self,
regions: Vec<GuestRegionMmap>,
state: &GuestMemoryState,
) -> Result<(), VmError> {
for (region, state) in regions.into_iter().zip(state.regions.iter()) {
let slot_cnt = state
.plugged
.len()
.try_into()
.map_err(|_| VmError::NotEnoughMemorySlots(self.common.max_memslots))?;
let next_slot = self
.next_kvm_slot(slot_cnt)
.ok_or(VmError::NotEnoughMemorySlots(self.common.max_memslots))?;
let arcd_region = Arc::new(GuestRegionMmapExt::from_state(region, state, next_slot)?);
self.register_memory_region(arcd_region)?
}
Ok(())
}
/// Gets a reference to the kvm file descriptor owned by this VM.
pub fn fd(&self) -> &VmFd {
&self.common.fd
}
/// Gets a reference to this [`Vm`]'s [`GuestMemoryMmap`] object
pub fn guest_memory(&self) -> &GuestMemoryMmap {
&self.common.guest_memory
}
/// Gets a mutable reference to this [`Vm`]'s [`ResourceAllocator`] object
pub fn resource_allocator(&self) -> MutexGuard<'_, ResourceAllocator> {
self.common
.resource_allocator
.lock()
.expect("Poisoned lock")
}
/// Resets the KVM dirty bitmap for each of the guest's memory regions.
pub fn reset_dirty_bitmap(&self) {
self.guest_memory()
.iter()
.flat_map(|region| region.plugged_slots())
.for_each(|mem_slot| {
let _ = self.fd().get_dirty_log(mem_slot.slot, mem_slot.slice.len());
});
}
/// Retrieves the KVM dirty bitmap for each of the guest's memory regions.
pub fn get_dirty_bitmap(&self) -> Result<DirtyBitmap, VmError> {
self.guest_memory()
.iter()
.flat_map(|region| region.plugged_slots())
.map(|mem_slot| {
let bitmap = match mem_slot.slice.bitmap() {
Some(_) => self
.fd()
.get_dirty_log(mem_slot.slot, mem_slot.slice.len())
.map_err(VmError::GetDirtyLog)?,
None => mincore_bitmap(
mem_slot.slice.ptr_guard_mut().as_ptr(),
mem_slot.slice.len(),
)?,
};
Ok((mem_slot.slot, bitmap))
})
.collect()
}
/// Takes a snapshot of the virtual machine running inside the given [`Vmm`] and saves it to
/// `mem_file_path`.
///
/// If `snapshot_type` is [`SnapshotType::Diff`], and `mem_file_path` exists and is a snapshot
/// file of matching size, then the diff snapshot will be directly merged into the existing
/// snapshot. Otherwise, existing files are simply overwritten.
pub(crate) fn snapshot_memory_to_file(
&self,
mem_file_path: &Path,
snapshot_type: SnapshotType,
) -> Result<(), CreateSnapshotError> {
use self::CreateSnapshotError::*;
// Need to check this here, as we create the file in the line below
let file_existed = mem_file_path.exists();
let mut file = OpenOptions::new()
.write(true)
.create(true)
.truncate(false)
.open(mem_file_path)
.map_err(|err| MemoryBackingFile("open", err))?;
// Determine what size our total memory area is.
let mem_size_mib = mem_size_mib(self.guest_memory());
let expected_size = mem_size_mib * 1024 * 1024;
if file_existed {
let file_size = file
.metadata()
.map_err(|e| MemoryBackingFile("get_metadata", e))?
.len();
// Here we only truncate the file if the size mismatches.
// - For full snapshots, the entire file's contents will be overwritten anyway. We have
// to avoid truncating here to deal with the edge case where it represents the
// snapshot file from which this very microVM was loaded (as modifying the memory file
// would be reflected in the mmap of the file, meaning a truncate operation would zero
// out guest memory, and thus corrupt the VM).
// - For diff snapshots, we want to merge the diff layer directly into the file.
if file_size != expected_size {
file.set_len(0)
.map_err(|err| MemoryBackingFile("truncate", err))?;
}
}
// Set the length of the file to the full size of the memory area.
file.set_len(expected_size)
.map_err(|e| MemoryBackingFile("set_length", e))?;
match snapshot_type {
SnapshotType::Diff => {
let dirty_bitmap = self.get_dirty_bitmap()?;
self.guest_memory().dump_dirty(&mut file, &dirty_bitmap)?;
}
SnapshotType::Full => {
self.guest_memory().dump(&mut file)?;
self.reset_dirty_bitmap();
self.guest_memory().reset_dirty();
}
};
file.flush()
.map_err(|err| MemoryBackingFile("flush", err))?;
file.sync_all()
.map_err(|err| MemoryBackingFile("sync_all", err))
}
/// Register a device IRQ
pub fn register_irq(&self, fd: &EventFd, gsi: u32) -> Result<(), errno::Error> {
self.common.fd.register_irqfd(fd, gsi)?;
let mut entry = kvm_irq_routing_entry {
gsi,
type_: KVM_IRQ_ROUTING_IRQCHIP,
..Default::default()
};
#[cfg(target_arch = "x86_64")]
{
entry.u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC;
}
#[cfg(target_arch = "aarch64")]
{
entry.u.irqchip.irqchip = 0;
}
entry.u.irqchip.pin = gsi;
self.common
.interrupts
.lock()
.expect("Poisoned lock")
.insert(
gsi,
RoutingEntry {
entry,
masked: false,
},
);
Ok(())
}
/// Register an MSI device interrupt
pub fn register_msi(
&self,
route: &MsixVector,
masked: bool,
config: MsixVectorConfig,
) -> Result<(), errno::Error> {
let mut entry = kvm_irq_routing_entry {
gsi: route.gsi,
type_: KVM_IRQ_ROUTING_MSI,
..Default::default()
};
entry.u.msi.address_lo = config.low_addr;
entry.u.msi.address_hi = config.high_addr;
entry.u.msi.data = config.data;
if self.common.fd.check_extension(kvm_ioctls::Cap::MsiDevid) {
// According to KVM documentation:
// https://docs.kernel.org/virt/kvm/api.html#kvm-set-gsi-routing
//
// if the capability is set, we need to set the flag and provide a valid unique device
// ID. "For PCI, this is usually a BDF identifier in the lower 16 bits".
//
// The layout of `config.devid` is:
//
// |---- 16 bits ----|-- 8 bits --|-- 5 bits --|-- 3 bits --|
// | segment | bus | device | function |
//
// For the time being, we are using a single PCI segment and a single bus per segment
// so just passing config.devid should be fine.
entry.flags = KVM_MSI_VALID_DEVID;
entry.u.msi.__bindgen_anon_1.devid = config.devid;
}
self.common
.interrupts
.lock()
.expect("Poisoned lock")
.insert(route.gsi, RoutingEntry { entry, masked });
Ok(())
}
/// Create a group of MSI-X interrupts
pub fn create_msix_group(vm: Arc<Vm>, count: u16) -> Result<MsixVectorGroup, InterruptError> {
debug!("Creating new MSI group with {count} vectors");
let mut vectors = Vec::with_capacity(count as usize);
for gsi in vm
.resource_allocator()
.allocate_gsi_msi(count as u32)?
.iter()
{
vectors.push(MsixVector::new(*gsi, false)?);
}
Ok(MsixVectorGroup { vm, vectors })
}
/// Set GSI routes to KVM
pub fn set_gsi_routes(&self) -> Result<(), InterruptError> {
let entries = self.common.interrupts.lock().expect("Poisoned lock");
let mut routes = KvmIrqRouting::new(0)?;
for entry in entries.values() {
if entry.masked {
continue;
}
routes.push(entry.entry)?;
}
self.common.fd.set_gsi_routing(&routes)?;
Ok(())
}
}
/// Use `mincore(2)` to overapproximate the dirty bitmap for the given memslot. To be used
/// if a diff snapshot is requested, but dirty page tracking wasn't enabled.
fn mincore_bitmap(addr: *mut u8, len: usize) -> Result<Vec<u64>, VmError> {
// TODO: Once Host 5.10 goes out of support, we can make this more robust and work on
// swap-enabled systems, by doing mlock2(MLOCK_ONFAULT)/munlock() in this function (to
// force swapped-out pages to get paged in, so that mincore will consider them incore).
// However, on AMD (m6a/m7a) 5.10, doing so introduces a 100%/30ms regression to snapshot
// creation, even if swap is disabled, so currently it cannot be done.
// Mincore always works at PAGE_SIZE granularity, even if the VMA we are dealing with
// is a hugetlbfs VMA (e.g. to report a single hugepage as "present", mincore will
// give us 512 4k markers with the lowest bit set).
let page_size = host_page_size();
let mut mincore_bitmap = vec![0u8; len / page_size];
let mut bitmap = vec![0u64; (len / page_size).div_ceil(64)];
// SAFETY: The safety invariants of GuestRegionMmap ensure that region.as_ptr() is a valid
// userspace mapping of size region.len() bytes. The bitmap has exactly one byte for each
// page in this userspace mapping. Note that mincore does not operate on bitmaps like
// KVM_MEM_LOG_DIRTY_PAGES, but rather it uses 8 bits per page (e.g. 1 byte), setting the
// least significant bit to 1 if the page corresponding to a byte is in core (available in
// the page cache and resolvable via just a minor page fault).
let r = unsafe { libc::mincore(addr.cast(), len, mincore_bitmap.as_mut_ptr()) };
if r != 0 {
return Err(VmError::Mincore(vmm_sys_util::errno::Error::last()));
}
for (page_idx, b) in mincore_bitmap.iter().enumerate() {
bitmap[page_idx / 64] |= (*b as u64 & 0x1) << (page_idx as u64 % 64);
}
Ok(bitmap)
}
impl DeviceRelocation for Vm {
fn move_bar(
&self,
_old_base: u64,
_new_base: u64,
_len: u64,
_pci_dev: &mut dyn PciDevice,
) -> Result<(), DeviceRelocationError> {
Err(DeviceRelocationError::NotSupported)
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::sync::atomic::Ordering;
use vm_memory::GuestAddress;
use vm_memory::mmap::MmapRegionBuilder;
use super::*;
use crate::snapshot::Persist;
#[cfg(target_arch = "x86_64")]
use crate::snapshot::Snapshot;
use crate::test_utils::single_region_mem_raw;
use crate::utils::mib_to_bytes;
use crate::vstate::kvm::Kvm;
use crate::vstate::memory::GuestRegionMmap;
// Auxiliary function being used throughout the tests.
pub(crate) fn setup_vm() -> (Kvm, Vm) {
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
let vm = Vm::new(&kvm).expect("Cannot create new vm");
(kvm, vm)
}
// Auxiliary function being used throughout the tests.
pub(crate) fn setup_vm_with_memory(mem_size: usize) -> (Kvm, Vm) {
let (kvm, mut vm) = setup_vm();
let gm = single_region_mem_raw(mem_size);
vm.register_dram_memory_regions(gm).unwrap();
(kvm, vm)
}
#[test]
fn test_new() {
// Testing with a valid /dev/kvm descriptor.
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
Vm::new(&kvm).unwrap();
}
#[test]
fn test_register_memory_regions() {
let (_, mut vm) = setup_vm();
// Trying to set a memory region with a size that is not a multiple of GUEST_PAGE_SIZE
// will result in error.
let gm = single_region_mem_raw(0x10);
let res = vm.register_dram_memory_regions(gm);
assert_eq!(
res.unwrap_err().to_string(),
"Cannot set the memory regions: Invalid argument (os error 22)"
);
let gm = single_region_mem_raw(0x1000);
let res = vm.register_dram_memory_regions(gm);
res.unwrap();
}
#[test]
fn test_too_many_regions() {
let (kvm, mut vm) = setup_vm();
let max_nr_regions = kvm.max_nr_memslots();
// SAFETY: valid mmap parameters
let ptr = unsafe {
libc::mmap(
std::ptr::null_mut(),
0x1000,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
-1,
0,
)
};
assert_ne!(ptr, libc::MAP_FAILED);
for i in 0..=max_nr_regions {
// SAFETY: we assert above that the ptr is valid, and the size matches what we passed to
// mmap
let region = unsafe {
MmapRegionBuilder::new(0x1000)
.with_raw_mmap_pointer(ptr.cast())
.build()
.unwrap()
};
let region = GuestRegionMmap::new(region, GuestAddress(i as u64 * 0x1000)).unwrap();
let res = vm.register_dram_memory_regions(vec![region]);
if max_nr_regions <= i {
assert!(
matches!(res, Err(VmError::NotEnoughMemorySlots(v)) if v == max_nr_regions),
"{:?} at iteration {}",
res,
i
);
} else {
res.unwrap_or_else(|_| {
panic!(
"to be able to insert more regions in iteration {i} - max_nr_memslots: \
{max_nr_regions} - num_regions: {}",
vm.guest_memory().num_regions()
)
});
}
}
}
#[test]
fn test_create_vcpus() {
let vcpu_count = 2;
let (_, mut vm) = setup_vm_with_memory(mib_to_bytes(128));
let (vcpu_vec, _) = vm.create_vcpus(vcpu_count).unwrap();
assert_eq!(vcpu_vec.len(), vcpu_count as usize);
}
fn enable_irqchip(vm: &mut Vm) {
#[cfg(target_arch = "x86_64")]
vm.setup_irqchip().unwrap();
#[cfg(target_arch = "aarch64")]
vm.setup_irqchip(1).unwrap();
}
fn create_msix_group(vm: &Arc<Vm>) -> MsixVectorGroup {
Vm::create_msix_group(vm.clone(), 4).unwrap()
}
#[test]
fn test_msi_vector_group_new() {
let (_, vm) = setup_vm_with_memory(mib_to_bytes(128));
let vm = Arc::new(vm);
let msix_group = create_msix_group(&vm);
assert_eq!(msix_group.num_vectors(), 4);
}
#[test]
fn test_msi_vector_group_enable_disable() {
let (_, mut vm) = setup_vm_with_memory(mib_to_bytes(128));
enable_irqchip(&mut vm);
let vm = Arc::new(vm);
let msix_group = create_msix_group(&vm);
// Initially all vectors are disabled
for route in &msix_group.vectors {
assert!(!route.enabled.load(Ordering::Acquire))
}
// Enable works
msix_group.enable().unwrap();
for route in &msix_group.vectors {
assert!(route.enabled.load(Ordering::Acquire));
}
// Enabling an enabled group doesn't error out
msix_group.enable().unwrap();
// Disable works
msix_group.disable().unwrap();
for route in &msix_group.vectors {
assert!(!route.enabled.load(Ordering::Acquire))
}
// Disabling a disabled group doesn't error out
}
#[test]
fn test_msi_vector_group_trigger() {
let (_, mut vm) = setup_vm_with_memory(mib_to_bytes(128));
enable_irqchip(&mut vm);
let vm = Arc::new(vm);
let msix_group = create_msix_group(&vm);
// We can now trigger all vectors
for i in 0..4 {
msix_group.trigger(i).unwrap()
}
// We can't trigger an invalid vector
msix_group.trigger(4).unwrap_err();
}
#[test]
fn test_msi_vector_group_notifier() {
let (_, vm) = setup_vm_with_memory(mib_to_bytes(128));
let vm = Arc::new(vm);
let msix_group = create_msix_group(&vm);
for i in 0..4 {
assert!(msix_group.notifier(i).is_some());
}
assert!(msix_group.notifier(4).is_none());
}
#[test]
fn test_msi_vector_group_update_invalid_vector() {
let (_, mut vm) = setup_vm_with_memory(mib_to_bytes(128));
enable_irqchip(&mut vm);
let vm = Arc::new(vm);
let msix_group = create_msix_group(&vm);
let config = MsixVectorConfig {
high_addr: 0x42,
low_addr: 0x12,
data: 0x12,
devid: 0xafa,
};
msix_group.update(0, config, true, true).unwrap();
msix_group.update(4, config, true, true).unwrap_err();
}
#[test]
fn test_msi_vector_group_update() {
let (_, mut vm) = setup_vm_with_memory(mib_to_bytes(128));
enable_irqchip(&mut vm);
let vm = Arc::new(vm);
assert!(vm.common.interrupts.lock().unwrap().is_empty());
let msix_group = create_msix_group(&vm);
// Set some configuration for the vectors. Initially all are masked
let mut config = MsixVectorConfig {
high_addr: 0x42,
low_addr: 0x13,
data: 0x12,
devid: 0xafa,
};
for i in 0..4 {
config.data = 0x12 * i;
msix_group.update(i as usize, config, true, false).unwrap();
}
// All vectors should be disabled
for vector in &msix_group.vectors {
assert!(!vector.enabled.load(Ordering::Acquire));
}
for i in 0..4 {
let gsi = crate::arch::GSI_MSI_START + i;
let interrupts = vm.common.interrupts.lock().unwrap();
let kvm_route = interrupts.get(&gsi).unwrap();
assert!(kvm_route.masked);
assert_eq!(kvm_route.entry.gsi, gsi);
assert_eq!(kvm_route.entry.type_, KVM_IRQ_ROUTING_MSI);
// SAFETY: because we know we setup MSI routes.
unsafe {
assert_eq!(kvm_route.entry.u.msi.address_hi, 0x42);
assert_eq!(kvm_route.entry.u.msi.address_lo, 0x13);
assert_eq!(kvm_route.entry.u.msi.data, 0x12 * i);
}
}
// Simply enabling the vectors should not update the registered IRQ routes
msix_group.enable().unwrap();
for i in 0..4 {
let gsi = crate::arch::GSI_MSI_START + i;
let interrupts = vm.common.interrupts.lock().unwrap();
let kvm_route = interrupts.get(&gsi).unwrap();
assert!(kvm_route.masked);
assert_eq!(kvm_route.entry.gsi, gsi);
assert_eq!(kvm_route.entry.type_, KVM_IRQ_ROUTING_MSI);
// SAFETY: because we know we setup MSI routes.
unsafe {
assert_eq!(kvm_route.entry.u.msi.address_hi, 0x42);
assert_eq!(kvm_route.entry.u.msi.address_lo, 0x13);
assert_eq!(kvm_route.entry.u.msi.data, 0x12 * i);
}
}
// Updating the config of a vector should enable its route (and only its route)
config.data = 0;
msix_group.update(0, config, false, true).unwrap();
for i in 0..4 {
let gsi = crate::arch::GSI_MSI_START + i;
let interrupts = vm.common.interrupts.lock().unwrap();
let kvm_route = interrupts.get(&gsi).unwrap();
assert_eq!(kvm_route.masked, i != 0);
assert_eq!(kvm_route.entry.gsi, gsi);
assert_eq!(kvm_route.entry.type_, KVM_IRQ_ROUTING_MSI);
// SAFETY: because we know we setup MSI routes.
unsafe {
assert_eq!(kvm_route.entry.u.msi.address_hi, 0x42);
assert_eq!(kvm_route.entry.u.msi.address_lo, 0x13);
assert_eq!(kvm_route.entry.u.msi.data, 0x12 * i);
}
}
}
#[test]
fn test_msi_vector_group_persistence() {
let (_, mut vm) = setup_vm_with_memory(mib_to_bytes(128));
enable_irqchip(&mut vm);
let vm = Arc::new(vm);
let msix_group = create_msix_group(&vm);
msix_group.enable().unwrap();
let state = msix_group.save();
let restored_group = MsixVectorGroup::restore(vm, &state).unwrap();
assert_eq!(msix_group.num_vectors(), restored_group.num_vectors());
// Even if an MSI group is enabled, we don't save it as such. During restoration, the PCI
// transport will make sure the correct config is set for the vectors and enable them
// accordingly.
for (id, vector) in msix_group.vectors.iter().enumerate() {
let new_vector = &restored_group.vectors[id];
assert_eq!(vector.gsi, new_vector.gsi);
assert!(!new_vector.enabled.load(Ordering::Acquire));
}
}
#[cfg(target_arch = "x86_64")]
#[test]
fn test_restore_state_resource_allocator() {
use vm_allocator::AllocPolicy;
let mut snapshot_data = vec![0u8; 10000];
let (_, mut vm) = setup_vm_with_memory(0x1000);
vm.setup_irqchip().unwrap();
// Allocate a GSI and some memory and make sure they are still allocated after restore
let (gsi, range) = {
let mut resource_allocator = vm.resource_allocator();
let gsi = resource_allocator.allocate_gsi_msi(1).unwrap()[0];
let range = resource_allocator
.allocate_32bit_mmio_memory(1024, 1024, AllocPolicy::FirstMatch)
.unwrap();
(gsi, range)
};
let state = vm.save_state().unwrap();
Snapshot::new(state)
.save(&mut snapshot_data.as_mut_slice())
.unwrap();
let restored_state: VmState = Snapshot::load_without_crc_check(snapshot_data.as_slice())
.unwrap()
.data;
vm.restore_state(&restored_state).unwrap();
let mut resource_allocator = vm.resource_allocator();
let gsi_new = resource_allocator.allocate_gsi_msi(1).unwrap()[0];
assert_eq!(gsi + 1, gsi_new);
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vstate/interrupts.rs | src/vmm/src/vstate/interrupts.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use kvm_ioctls::VmFd;
use vmm_sys_util::eventfd::EventFd;
use crate::Vm;
use crate::logger::{IncMetric, METRICS};
use crate::snapshot::Persist;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// Errors related with Firecracker interrupts
pub enum InterruptError {
/// Error allocating resources: {0}
Allocator(#[from] vm_allocator::Error),
/// IO error: {0}
Io(#[from] std::io::Error),
/// FamStruct error: {0}
FamStruct(#[from] vmm_sys_util::fam::Error),
/// KVM error: {0}
Kvm(#[from] kvm_ioctls::Error),
/// Invalid vector index: {0}
InvalidVectorIndex(usize),
}
/// Configuration data for an MSI-X interrupt.
#[derive(Copy, Clone, Debug, Default)]
pub struct MsixVectorConfig {
/// High address to delivery message signaled interrupt.
pub high_addr: u32,
/// Low address to delivery message signaled interrupt.
pub low_addr: u32,
/// Data to write to delivery message signaled interrupt.
pub data: u32,
/// Unique ID of the device to delivery message signaled interrupt.
pub devid: u32,
}
/// Type that describes an allocated interrupt
#[derive(Debug)]
pub struct MsixVector {
/// GSI used for this vector
pub gsi: u32,
/// EventFd used for this vector
pub event_fd: EventFd,
/// Flag determining whether the vector is enabled
pub enabled: AtomicBool,
}
impl MsixVector {
/// Create a new [`MsixVector`] of a particular type
pub fn new(gsi: u32, enabled: bool) -> Result<MsixVector, InterruptError> {
Ok(MsixVector {
gsi,
event_fd: EventFd::new(libc::EFD_NONBLOCK)?,
enabled: AtomicBool::new(enabled),
})
}
}
impl MsixVector {
/// Enable vector
pub fn enable(&self, vmfd: &VmFd) -> Result<(), InterruptError> {
if !self.enabled.load(Ordering::Acquire) {
vmfd.register_irqfd(&self.event_fd, self.gsi)?;
self.enabled.store(true, Ordering::Release);
}
Ok(())
}
/// Disable vector
pub fn disable(&self, vmfd: &VmFd) -> Result<(), InterruptError> {
if self.enabled.load(Ordering::Acquire) {
vmfd.unregister_irqfd(&self.event_fd, self.gsi)?;
self.enabled.store(false, Ordering::Release);
}
Ok(())
}
}
#[derive(Debug)]
/// MSI interrupts created for a VirtIO device
pub struct MsixVectorGroup {
/// Reference to the Vm object, which we'll need for interacting with the underlying KVM Vm
/// file descriptor
pub vm: Arc<Vm>,
/// A list of all the MSI-X vectors
pub vectors: Vec<MsixVector>,
}
impl MsixVectorGroup {
/// Returns the number of vectors in this group
pub fn num_vectors(&self) -> u16 {
// It is safe to unwrap here. We are creating `MsixVectorGroup` objects through the
// `Vm::create_msix_group` where the argument for the number of `vectors` is a `u16`.
u16::try_from(self.vectors.len()).unwrap()
}
/// Enable the MSI-X vector group
pub fn enable(&self) -> Result<(), InterruptError> {
for route in &self.vectors {
route.enable(&self.vm.common.fd)?;
}
Ok(())
}
/// Disable the MSI-X vector group
pub fn disable(&self) -> Result<(), InterruptError> {
for route in &self.vectors {
route.disable(&self.vm.common.fd)?;
}
Ok(())
}
/// Trigger an interrupt for a vector in the group
pub fn trigger(&self, index: usize) -> Result<(), InterruptError> {
self.notifier(index)
.ok_or(InterruptError::InvalidVectorIndex(index))?
.write(1)?;
METRICS.interrupts.triggers.inc();
Ok(())
}
/// Get a referece to the underlying `EventFd` used to trigger interrupts for a vector in the
/// group
pub fn notifier(&self, index: usize) -> Option<&EventFd> {
self.vectors.get(index).map(|route| &route.event_fd)
}
/// Update the MSI-X configuration for a vector in the group
pub fn update(
&self,
index: usize,
msi_config: MsixVectorConfig,
masked: bool,
set_gsi: bool,
) -> Result<(), InterruptError> {
if let Some(vector) = self.vectors.get(index) {
METRICS.interrupts.config_updates.inc();
// When an interrupt is masked the GSI will not be passed to KVM through
// KVM_SET_GSI_ROUTING. So, call [`disable()`] to unregister the interrupt file
// descriptor before passing the interrupt routes to KVM
if masked {
vector.disable(&self.vm.common.fd)?;
}
self.vm.register_msi(vector, masked, msi_config)?;
if set_gsi {
self.vm
.set_gsi_routes()
.map_err(|err| std::io::Error::other(format!("MSI-X update: {err}")))?
}
// Assign KVM_IRQFD after KVM_SET_GSI_ROUTING to avoid
// panic on kernel which does not have commit a80ced6ea514
// (KVM: SVM: fix panic on out-of-bounds guest IRQ).
if !masked {
vector.enable(&self.vm.common.fd)?;
}
return Ok(());
}
Err(InterruptError::InvalidVectorIndex(index))
}
}
impl<'a> Persist<'a> for MsixVectorGroup {
type State = Vec<u32>;
type ConstructorArgs = Arc<Vm>;
type Error = InterruptError;
fn save(&self) -> Self::State {
// We don't save the "enabled" state of the MSI interrupt. PCI devices store the MSI-X
// configuration and make sure that the vector is enabled during the restore path if it was
// initially enabled
self.vectors.iter().map(|route| route.gsi).collect()
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> std::result::Result<Self, Self::Error> {
let mut vectors = Vec::with_capacity(state.len());
for gsi in state {
vectors.push(MsixVector::new(*gsi, false)?);
}
Ok(MsixVectorGroup {
vm: constructor_args,
vectors,
})
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vstate/kvm.rs | src/vmm/src/vstate/kvm.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use kvm_bindings::KVM_API_VERSION;
use kvm_ioctls::Kvm as KvmFd;
use serde::{Deserialize, Serialize};
pub use crate::arch::{Kvm, KvmArchError};
use crate::cpu_config::templates::KvmCapability;
/// Errors associated with the wrappers over KVM ioctls.
/// Needs `rustfmt::skip` to make multiline comments work
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum KvmError {
/// The host kernel reports an invalid KVM API version: {0}
ApiVersion(i32),
/// Missing KVM capabilities: {0:#x?}
Capabilities(u32),
/** Error creating KVM object: {0} Make sure the user launching the firecracker process is \
configured on the /dev/kvm file's ACL. */
Kvm(kvm_ioctls::Error),
/// Architecture specific error: {0}
ArchError(#[from] KvmArchError)
}
impl Kvm {
/// Create `Kvm` struct.
pub fn new(kvm_cap_modifiers: Vec<KvmCapability>) -> Result<Self, KvmError> {
let kvm_fd = KvmFd::new().map_err(KvmError::Kvm)?;
// Check that KVM has the correct version.
// Safe to cast because this is a constant.
#[allow(clippy::cast_possible_wrap)]
if kvm_fd.get_api_version() != KVM_API_VERSION as i32 {
return Err(KvmError::ApiVersion(kvm_fd.get_api_version()));
}
let total_caps = Self::combine_capabilities(&kvm_cap_modifiers);
// Check that all desired capabilities are supported.
Self::check_capabilities(&kvm_fd, &total_caps).map_err(KvmError::Capabilities)?;
Ok(Kvm::init_arch(kvm_fd, kvm_cap_modifiers)?)
}
fn combine_capabilities(kvm_cap_modifiers: &[KvmCapability]) -> Vec<u32> {
let mut total_caps = Self::DEFAULT_CAPABILITIES.to_vec();
for modifier in kvm_cap_modifiers.iter() {
match modifier {
KvmCapability::Add(cap) => {
if !total_caps.contains(cap) {
total_caps.push(*cap);
}
}
KvmCapability::Remove(cap) => {
if let Some(pos) = total_caps.iter().position(|c| c == cap) {
total_caps.swap_remove(pos);
}
}
}
}
total_caps
}
fn check_capabilities(kvm_fd: &KvmFd, capabilities: &[u32]) -> Result<(), u32> {
for cap in capabilities {
// If capability is not supported kernel will return 0.
if kvm_fd.check_extension_raw(u64::from(*cap)) == 0 {
return Err(*cap);
}
}
Ok(())
}
/// Saves and returns the Kvm state.
pub fn save_state(&self) -> KvmState {
KvmState {
kvm_cap_modifiers: self.kvm_cap_modifiers.clone(),
}
}
/// Returns the maximal number of memslots allowed in a [`Vm`]
pub fn max_nr_memslots(&self) -> u32 {
self.fd
.get_nr_memslots()
.try_into()
.expect("Number of vcpus reported by KVM exceeds u32::MAX")
}
}
/// Structure holding an general specific VM state.
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct KvmState {
/// Additional capabilities that were specified in cpu template.
pub kvm_cap_modifiers: Vec<KvmCapability>,
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_combine_capabilities() {
// Default caps for x86_64 and aarch64 both have KVM_CAP_IOEVENTFD and don't have
// KVM_CAP_IOMMU caps.
let additional_capabilities = vec![
KvmCapability::Add(kvm_bindings::KVM_CAP_IOMMU),
KvmCapability::Remove(kvm_bindings::KVM_CAP_IOEVENTFD),
];
let combined_caps = Kvm::combine_capabilities(&additional_capabilities);
assert!(combined_caps.contains(&kvm_bindings::KVM_CAP_IOMMU));
assert!(!combined_caps.contains(&kvm_bindings::KVM_CAP_IOEVENTFD));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vstate/vcpu.rs | src/vmm/src/vstate/vcpu.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::os::fd::AsRawFd;
use std::sync::atomic::{Ordering, fence};
use std::sync::mpsc::{Receiver, Sender, TryRecvError, channel};
use std::sync::{Arc, Barrier};
use std::{fmt, io, thread};
use kvm_bindings::{KVM_SYSTEM_EVENT_RESET, KVM_SYSTEM_EVENT_SHUTDOWN};
use kvm_ioctls::{VcpuExit, VcpuFd};
use libc::{c_int, c_void, siginfo_t};
use log::{error, info, warn};
use vmm_sys_util::errno;
use vmm_sys_util::eventfd::EventFd;
use crate::FcExitCode;
pub use crate::arch::{KvmVcpu, KvmVcpuConfigureError, KvmVcpuError, Peripherals, VcpuState};
use crate::cpu_config::templates::{CpuConfiguration, GuestConfigError};
#[cfg(feature = "gdb")]
use crate::gdb::target::{GdbTargetError, get_raw_tid};
use crate::logger::{IncMetric, METRICS};
use crate::seccomp::{BpfProgram, BpfProgramRef};
use crate::utils::signal::{Killable, register_signal_handler, sigrtmin};
use crate::utils::sm::StateMachine;
use crate::vstate::bus::Bus;
use crate::vstate::vm::Vm;
/// Signal number (SIGRTMIN) used to kick Vcpus.
pub const VCPU_RTSIG_OFFSET: i32 = 0;
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VcpuError {
/// Error creating vcpu config: {0}
VcpuConfig(GuestConfigError),
/// Received error signaling kvm exit: {0}
FaultyKvmExit(String),
/// Failed to signal vcpu: {0}
SignalVcpu(vmm_sys_util::errno::Error),
/// Unexpected kvm exit received: {0}
UnhandledKvmExit(String),
/// Failed to run action on vcpu: {0}
VcpuResponse(KvmVcpuError),
/// Cannot spawn a new vCPU thread: {0}
VcpuSpawn(io::Error),
/// Vcpu not present in TLS
VcpuTlsNotPresent,
/// Error with gdb request sent
#[cfg(feature = "gdb")]
GdbRequest(GdbTargetError),
}
/// Encapsulates configuration parameters for the guest vCPUS.
#[derive(Debug)]
pub struct VcpuConfig {
/// Number of guest VCPUs.
pub vcpu_count: u8,
/// Enable simultaneous multithreading in the CPUID configuration.
pub smt: bool,
/// Configuration for vCPU
pub cpu_config: CpuConfiguration,
}
/// Error type for [`Vcpu::start_threaded`].
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum StartThreadedError {
/// Failed to spawn vCPU thread: {0}
Spawn(std::io::Error),
/// Failed to clone kvm Vcpu fd: {0}
CopyFd(CopyKvmFdError),
}
/// Error type for [`Vcpu::copy_kvm_vcpu_fd`].
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum CopyKvmFdError {
/// Error with libc dup of kvm Vcpu fd
DupError(#[from] std::io::Error),
/// Error creating the Vcpu from the duplicated Vcpu fd
CreateVcpuError(#[from] kvm_ioctls::Error),
}
/// A wrapper around creating and using a vcpu.
#[derive(Debug)]
pub struct Vcpu {
/// Access to kvm-arch specific functionality.
pub kvm_vcpu: KvmVcpu,
/// File descriptor for vcpu to trigger exit event on vmm.
exit_evt: EventFd,
/// Debugger emitter for gdb events
#[cfg(feature = "gdb")]
gdb_event: Option<Sender<usize>>,
/// The receiving end of events channel owned by the vcpu side.
event_receiver: Receiver<VcpuEvent>,
/// The transmitting end of the events channel which will be given to the handler.
event_sender: Option<Sender<VcpuEvent>>,
/// The receiving end of the responses channel which will be given to the handler.
response_receiver: Option<Receiver<VcpuResponse>>,
/// The transmitting end of the responses channel owned by the vcpu side.
response_sender: Sender<VcpuResponse>,
}
impl Vcpu {
/// Registers a signal handler which kicks the vcpu running on the current thread, if there is
/// one.
fn register_kick_signal_handler(&mut self) {
extern "C" fn handle_signal(_: c_int, _: *mut siginfo_t, _: *mut c_void) {
// We write to the immediate_exit from other thread, so make sure the read in the
// KVM_RUN sees the up to date value
fence(Ordering::Acquire);
}
register_signal_handler(sigrtmin() + VCPU_RTSIG_OFFSET, handle_signal)
.expect("Failed to register vcpu signal handler");
}
/// Constructs a new VCPU for `vm`.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
/// * `exit_evt` - An `EventFd` that will be written into when this vcpu exits.
pub fn new(index: u8, vm: &Vm, exit_evt: EventFd) -> Result<Self, VcpuError> {
let (event_sender, event_receiver) = channel();
let (response_sender, response_receiver) = channel();
let kvm_vcpu = KvmVcpu::new(index, vm).unwrap();
Ok(Vcpu {
exit_evt,
event_receiver,
event_sender: Some(event_sender),
response_receiver: Some(response_receiver),
response_sender,
#[cfg(feature = "gdb")]
gdb_event: None,
kvm_vcpu,
})
}
/// Sets a MMIO bus for this vcpu.
pub fn set_mmio_bus(&mut self, mmio_bus: Arc<Bus>) {
self.kvm_vcpu.peripherals.mmio_bus = Some(mmio_bus);
}
/// Attaches the fields required for debugging
#[cfg(feature = "gdb")]
pub fn attach_debug_info(&mut self, gdb_event: Sender<usize>) {
self.gdb_event = Some(gdb_event);
}
/// Obtains a copy of the VcpuFd
pub fn copy_kvm_vcpu_fd(&self, vm: &Vm) -> Result<VcpuFd, CopyKvmFdError> {
// SAFETY: We own this fd so it is considered safe to clone
let r = unsafe { libc::dup(self.kvm_vcpu.fd.as_raw_fd()) };
if r < 0 {
return Err(std::io::Error::last_os_error().into());
}
// SAFETY: We assert this is a valid fd by checking the result from the dup
unsafe { Ok(vm.fd().create_vcpu_from_rawfd(r)?) }
}
/// Moves the vcpu to its own thread and constructs a VcpuHandle.
/// The handle can be used to control the remote vcpu.
pub fn start_threaded(
mut self,
vm: &Vm,
seccomp_filter: Arc<BpfProgram>,
barrier: Arc<Barrier>,
) -> Result<VcpuHandle, StartThreadedError> {
let event_sender = self.event_sender.take().expect("vCPU already started");
let response_receiver = self.response_receiver.take().unwrap();
let vcpu_fd = self
.copy_kvm_vcpu_fd(vm)
.map_err(StartThreadedError::CopyFd)?;
let vcpu_thread = thread::Builder::new()
.name(format!("fc_vcpu {}", self.kvm_vcpu.index))
.spawn(move || {
let filter = &*seccomp_filter;
self.register_kick_signal_handler();
// Synchronization to make sure thread local data is initialized.
barrier.wait();
self.run(filter);
})
.map_err(StartThreadedError::Spawn)?;
Ok(VcpuHandle::new(
event_sender,
response_receiver,
vcpu_fd,
vcpu_thread,
))
}
/// Main loop of the vCPU thread.
///
/// Runs the vCPU in KVM context in a loop. Handles KVM_EXITs then goes back in.
/// Note that the state of the VCPU and associated VM must be setup first for this to do
/// anything useful.
pub fn run(&mut self, seccomp_filter: BpfProgramRef) {
// Load seccomp filters for this vCPU thread.
// Execution panics if filters cannot be loaded, use --no-seccomp if skipping filters
// altogether is the desired behaviour.
if let Err(err) = crate::seccomp::apply_filter(seccomp_filter) {
panic!(
"Failed to set the requested seccomp filters on vCPU {}: Error: {}",
self.kvm_vcpu.index, err
);
}
// Start running the machine state in the `Paused` state.
StateMachine::run(self, Self::paused);
}
// This is the main loop of the `Running` state.
fn running(&mut self) -> StateMachine<Self> {
// This loop is here just for optimizing the emulation path.
// No point in ticking the state machine if there are no external events.
loop {
match self.run_emulation() {
// Emulation ran successfully, continue.
Ok(VcpuEmulation::Handled) => (),
// Emulation was interrupted, check external events.
Ok(VcpuEmulation::Interrupted) => break,
// If the guest was rebooted or halted:
// - vCPU0 will always exit out of `KVM_RUN` with KVM_EXIT_SHUTDOWN or KVM_EXIT_HLT.
// - the other vCPUs won't ever exit out of `KVM_RUN`, but they won't consume CPU.
// So we pause vCPU0 and send a signal to the emulation thread to stop the VMM.
Ok(VcpuEmulation::Stopped) => return self.exit(FcExitCode::Ok),
// If the emulation requests a pause lets do this
#[cfg(feature = "gdb")]
Ok(VcpuEmulation::Paused) => {
#[cfg(target_arch = "x86_64")]
self.kvm_vcpu.kvmclock_ctrl();
return StateMachine::next(Self::paused);
}
// Emulation errors lead to vCPU exit.
Err(_) => return self.exit(FcExitCode::GenericError),
}
}
// By default don't change state.
let mut state = StateMachine::next(Self::running);
// Break this emulation loop on any transition request/external event.
match self.event_receiver.try_recv() {
// Running ---- Pause ----> Paused
Ok(VcpuEvent::Pause) => {
// Nothing special to do.
self.response_sender
.send(VcpuResponse::Paused)
.expect("vcpu channel unexpectedly closed");
#[cfg(target_arch = "x86_64")]
self.kvm_vcpu.kvmclock_ctrl();
// Move to 'paused' state.
state = StateMachine::next(Self::paused);
}
Ok(VcpuEvent::Resume) => {
self.response_sender
.send(VcpuResponse::Resumed)
.expect("vcpu channel unexpectedly closed");
}
// SaveState cannot be performed on a running Vcpu.
Ok(VcpuEvent::SaveState) => {
self.response_sender
.send(VcpuResponse::NotAllowed(String::from(
"save/restore unavailable while running",
)))
.expect("vcpu channel unexpectedly closed");
}
// DumpCpuConfig cannot be performed on a running Vcpu.
Ok(VcpuEvent::DumpCpuConfig) => {
self.response_sender
.send(VcpuResponse::NotAllowed(String::from(
"cpu config dump is unavailable while running",
)))
.expect("vcpu channel unexpectedly closed");
}
Ok(VcpuEvent::Finish) => return StateMachine::finish(),
// Unhandled exit of the other end.
Err(TryRecvError::Disconnected) => {
// Move to 'exited' state.
state = self.exit(FcExitCode::GenericError);
}
// All other events or lack thereof have no effect on current 'running' state.
Err(TryRecvError::Empty) => (),
}
state
}
// This is the main loop of the `Paused` state.
fn paused(&mut self) -> StateMachine<Self> {
match self.event_receiver.recv() {
// Paused ---- Resume ----> Running
Ok(VcpuEvent::Resume) => {
if self.kvm_vcpu.fd.get_kvm_run().immediate_exit == 1u8 {
warn!(
"Received a VcpuEvent::Resume message with immediate_exit enabled. \
immediate_exit was disabled before proceeding"
);
self.kvm_vcpu.fd.set_kvm_immediate_exit(0);
}
self.response_sender
.send(VcpuResponse::Resumed)
.expect("vcpu channel unexpectedly closed");
// Move to 'running' state.
StateMachine::next(Self::running)
}
Ok(VcpuEvent::Pause) => {
self.response_sender
.send(VcpuResponse::Paused)
.expect("vcpu channel unexpectedly closed");
StateMachine::next(Self::paused)
}
Ok(VcpuEvent::SaveState) => {
// Save vcpu state.
self.kvm_vcpu
.save_state()
.map(|vcpu_state| {
self.response_sender
.send(VcpuResponse::SavedState(Box::new(vcpu_state)))
.expect("vcpu channel unexpectedly closed");
})
.unwrap_or_else(|err| {
self.response_sender
.send(VcpuResponse::Error(VcpuError::VcpuResponse(err)))
.expect("vcpu channel unexpectedly closed");
});
StateMachine::next(Self::paused)
}
Ok(VcpuEvent::DumpCpuConfig) => {
self.kvm_vcpu
.dump_cpu_config()
.map(|cpu_config| {
self.response_sender
.send(VcpuResponse::DumpedCpuConfig(Box::new(cpu_config)))
.expect("vcpu channel unexpectedly closed");
})
.unwrap_or_else(|err| {
self.response_sender
.send(VcpuResponse::Error(VcpuError::VcpuResponse(err)))
.expect("vcpu channel unexpectedly closed");
});
StateMachine::next(Self::paused)
}
Ok(VcpuEvent::Finish) => StateMachine::finish(),
// Unhandled exit of the other end.
Err(_) => {
// Move to 'exited' state.
self.exit(FcExitCode::GenericError)
}
}
}
// Transition to the exited state and finish on command.
fn exit(&mut self, exit_code: FcExitCode) -> StateMachine<Self> {
// To avoid cycles, all teardown paths take the following route:
// +------------------------+----------------------------+------------------------+
// | Vmm | Action | Vcpu |
// +------------------------+----------------------------+------------------------+
// 1 | | | vcpu.exit(exit_code) |
// 2 | | | vcpu.exit_evt.write(1) |
// 3 | | <--- EventFd::exit_evt --- | |
// 4 | vmm.stop() | | |
// 5 | | --- VcpuEvent::Finish ---> | |
// 6 | | | StateMachine::finish() |
// 7 | VcpuHandle::join() | | |
// 8 | vmm.shutdown_exit_code becomes Some(exit_code) breaking the main event loop |
// +------------------------+----------------------------+------------------------+
// Vcpu initiated teardown starts from `fn Vcpu::exit()` (step 1).
// Vmm initiated teardown starts from `pub fn Vmm::stop()` (step 4).
// Once `vmm.shutdown_exit_code` becomes `Some(exit_code)`, it is the upper layer's
// responsibility to break main event loop and propagate the exit code value.
// Signal Vmm of Vcpu exit.
if let Err(err) = self.exit_evt.write(1) {
METRICS.vcpu.failures.inc();
error!("Failed signaling vcpu exit event: {}", err);
}
// From this state we only accept going to finished.
loop {
self.response_sender
.send(VcpuResponse::Exited(exit_code))
.expect("vcpu channel unexpectedly closed");
// Wait for and only accept 'VcpuEvent::Finish'.
if let Ok(VcpuEvent::Finish) = self.event_receiver.recv() {
break;
}
}
StateMachine::finish()
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_emulation(&mut self) -> Result<VcpuEmulation, VcpuError> {
if self.kvm_vcpu.fd.get_kvm_run().immediate_exit == 1u8 {
warn!("Requested a vCPU run with immediate_exit enabled. The operation was skipped");
self.kvm_vcpu.fd.set_kvm_immediate_exit(0);
return Ok(VcpuEmulation::Interrupted);
}
match self.kvm_vcpu.fd.run() {
Err(ref err) if err.errno() == libc::EINTR => {
self.kvm_vcpu.fd.set_kvm_immediate_exit(0);
// Notify that this KVM_RUN was interrupted.
Ok(VcpuEmulation::Interrupted)
}
#[cfg(feature = "gdb")]
Ok(VcpuExit::Debug(_)) => {
if let Some(gdb_event) = &self.gdb_event {
gdb_event
.send(get_raw_tid(self.kvm_vcpu.index.into()))
.expect("Unable to notify gdb event");
}
Ok(VcpuEmulation::Paused)
}
emulation_result => handle_kvm_exit(&mut self.kvm_vcpu.peripherals, emulation_result),
}
}
}
/// Handle the return value of a call to [`VcpuFd::run`] and update our emulation accordingly
fn handle_kvm_exit(
peripherals: &mut Peripherals,
emulation_result: Result<VcpuExit, errno::Error>,
) -> Result<VcpuEmulation, VcpuError> {
match emulation_result {
Ok(run) => match run {
VcpuExit::MmioRead(addr, data) => {
if let Some(mmio_bus) = &peripherals.mmio_bus {
let _metric = METRICS.vcpu.exit_mmio_read_agg.record_latency_metrics();
if let Err(err) = mmio_bus.read(addr, data) {
warn!("Invalid MMIO read @ {addr:#x}:{:#x}: {err}", data.len());
}
METRICS.vcpu.exit_mmio_read.inc();
}
Ok(VcpuEmulation::Handled)
}
VcpuExit::MmioWrite(addr, data) => {
if let Some(mmio_bus) = &peripherals.mmio_bus {
let _metric = METRICS.vcpu.exit_mmio_write_agg.record_latency_metrics();
if let Err(err) = mmio_bus.write(addr, data) {
warn!("Invalid MMIO read @ {addr:#x}:{:#x}: {err}", data.len());
}
METRICS.vcpu.exit_mmio_write.inc();
}
Ok(VcpuEmulation::Handled)
}
VcpuExit::Hlt => {
info!("Received KVM_EXIT_HLT signal");
Ok(VcpuEmulation::Stopped)
}
VcpuExit::Shutdown => {
info!("Received KVM_EXIT_SHUTDOWN signal");
Ok(VcpuEmulation::Stopped)
}
// Documentation specifies that below kvm exits are considered
// errors.
VcpuExit::FailEntry(hardware_entry_failure_reason, cpu) => {
// Hardware entry failure.
METRICS.vcpu.failures.inc();
error!(
"Received KVM_EXIT_FAIL_ENTRY signal: {} on cpu {}",
hardware_entry_failure_reason, cpu
);
Err(VcpuError::FaultyKvmExit(format!(
"{:?}",
VcpuExit::FailEntry(hardware_entry_failure_reason, cpu)
)))
}
VcpuExit::InternalError => {
// Failure from the Linux KVM subsystem rather than from the hardware.
METRICS.vcpu.failures.inc();
error!("Received KVM_EXIT_INTERNAL_ERROR signal");
Err(VcpuError::FaultyKvmExit(format!(
"{:?}",
VcpuExit::InternalError
)))
}
VcpuExit::SystemEvent(event_type, event_flags) => match event_type {
KVM_SYSTEM_EVENT_RESET | KVM_SYSTEM_EVENT_SHUTDOWN => {
info!(
"Received KVM_SYSTEM_EVENT: type: {}, event: {:?}",
event_type, event_flags
);
Ok(VcpuEmulation::Stopped)
}
_ => {
METRICS.vcpu.failures.inc();
error!(
"Received KVM_SYSTEM_EVENT signal type: {}, flag: {:?}",
event_type, event_flags
);
Err(VcpuError::FaultyKvmExit(format!(
"{:?}",
VcpuExit::SystemEvent(event_type, event_flags)
)))
}
},
arch_specific_reason => {
// run specific architecture emulation.
peripherals.run_arch_emulation(arch_specific_reason)
}
},
// The unwrap on raw_os_error can only fail if we have a logic
// error in our code in which case it is better to panic.
Err(ref err) => match err.errno() {
libc::EAGAIN => Ok(VcpuEmulation::Handled),
libc::ENOSYS => {
METRICS.vcpu.failures.inc();
error!("Received ENOSYS error because KVM failed to emulate an instruction.");
Err(VcpuError::FaultyKvmExit(
"Received ENOSYS error because KVM failed to emulate an instruction."
.to_string(),
))
}
_ => {
METRICS.vcpu.failures.inc();
error!("Failure during vcpu run: {}", err);
Err(VcpuError::FaultyKvmExit(format!("{}", err)))
}
},
}
}
/// List of events that the Vcpu can receive.
#[derive(Debug, Clone)]
pub enum VcpuEvent {
/// The vCPU thread will end when receiving this message.
Finish,
/// Pause the Vcpu.
Pause,
/// Event to resume the Vcpu.
Resume,
/// Event to save the state of a paused Vcpu.
SaveState,
/// Event to dump CPU configuration of a paused Vcpu.
DumpCpuConfig,
}
/// List of responses that the Vcpu reports.
pub enum VcpuResponse {
/// Requested action encountered an error.
Error(VcpuError),
/// Vcpu is stopped.
Exited(FcExitCode),
/// Requested action not allowed.
NotAllowed(String),
/// Vcpu is paused.
Paused,
/// Vcpu is resumed.
Resumed,
/// Vcpu state is saved.
SavedState(Box<VcpuState>),
/// Vcpu is in the state where CPU config is dumped.
DumpedCpuConfig(Box<CpuConfiguration>),
}
impl fmt::Debug for VcpuResponse {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use crate::VcpuResponse::*;
match self {
Paused => write!(f, "VcpuResponse::Paused"),
Resumed => write!(f, "VcpuResponse::Resumed"),
Exited(code) => write!(f, "VcpuResponse::Exited({:?})", code),
SavedState(_) => write!(f, "VcpuResponse::SavedState"),
Error(err) => write!(f, "VcpuResponse::Error({:?})", err),
NotAllowed(reason) => write!(f, "VcpuResponse::NotAllowed({})", reason),
DumpedCpuConfig(_) => write!(f, "VcpuResponse::DumpedCpuConfig"),
}
}
}
/// Wrapper over Vcpu that hides the underlying interactions with the Vcpu thread.
#[derive(Debug)]
pub struct VcpuHandle {
event_sender: Sender<VcpuEvent>,
response_receiver: Receiver<VcpuResponse>,
/// VcpuFd
pub vcpu_fd: VcpuFd,
// Rust JoinHandles have to be wrapped in Option if you ever plan on 'join()'ing them.
// We want to be able to join these threads in tests.
vcpu_thread: Option<thread::JoinHandle<()>>,
}
/// Error type for [`VcpuHandle::send_event`].
#[derive(Debug, derive_more::From, thiserror::Error)]
#[error("Failed to signal vCPU: {0}")]
pub struct VcpuSendEventError(pub vmm_sys_util::errno::Error);
impl VcpuHandle {
/// Creates a new [`VcpuHandle`].
///
/// # Arguments
/// + `event_sender`: [`Sender`] to communicate [`VcpuEvent`] to control the vcpu.
/// + `response_received`: [`Received`] from which the vcpu's responses can be read.
/// + `vcpu_thread`: A [`JoinHandle`] for the vcpu thread.
pub fn new(
event_sender: Sender<VcpuEvent>,
response_receiver: Receiver<VcpuResponse>,
vcpu_fd: VcpuFd,
vcpu_thread: thread::JoinHandle<()>,
) -> Self {
Self {
event_sender,
response_receiver,
vcpu_fd,
vcpu_thread: Some(vcpu_thread),
}
}
/// Sends event to vCPU.
///
/// # Errors
///
/// When [`vmm_sys_util::linux::signal::Killable::kill`] errors.
pub fn send_event(&mut self, event: VcpuEvent) -> Result<(), VcpuSendEventError> {
// Use expect() to crash if the other thread closed this channel.
self.event_sender
.send(event)
.expect("event sender channel closed on vcpu end.");
// Kick the vcpu so it picks up the message.
// Add a fence to ensure the write is visible to the vpu thread
self.vcpu_fd.set_kvm_immediate_exit(1);
fence(Ordering::Release);
self.vcpu_thread
.as_ref()
// Safe to unwrap since constructor make this 'Some'.
.unwrap()
.kill(sigrtmin() + VCPU_RTSIG_OFFSET)?;
Ok(())
}
/// Returns a reference to the [`Received`] from which the vcpu's responses can be read.
pub fn response_receiver(&self) -> &Receiver<VcpuResponse> {
&self.response_receiver
}
}
// Wait for the Vcpu thread to finish execution
impl Drop for VcpuHandle {
fn drop(&mut self) {
// We assume that by the time a VcpuHandle is dropped, other code has run to
// get the state machine loop to finish so the thread is ready to join.
// The strategy of avoiding more complex messaging protocols during the Drop
// helps avoid cycles which were preventing a truly clean shutdown.
//
// If the code hangs at this point, that means that a Finish event was not
// sent by Vmm.
self.vcpu_thread.take().unwrap().join().unwrap();
}
}
/// Vcpu emulation state.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum VcpuEmulation {
/// Handled.
Handled,
/// Interrupted.
Interrupted,
/// Stopped.
Stopped,
/// Pause request
#[cfg(feature = "gdb")]
Paused,
}
#[cfg(test)]
pub(crate) mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
#[cfg(target_arch = "x86_64")]
use std::collections::BTreeMap;
use std::sync::atomic::Ordering;
use std::sync::{Arc, Barrier, Mutex};
use linux_loader::loader::KernelLoader;
use vmm_sys_util::errno;
use super::*;
use crate::RECV_TIMEOUT_SEC;
use crate::arch::{BootProtocol, EntryPoint};
use crate::seccomp::get_empty_filters;
use crate::utils::mib_to_bytes;
use crate::utils::signal::validate_signal_num;
use crate::vstate::bus::BusDevice;
use crate::vstate::kvm::Kvm;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
use crate::vstate::vcpu::VcpuError as EmulationError;
use crate::vstate::vm::Vm;
use crate::vstate::vm::tests::setup_vm_with_memory;
struct DummyDevice;
impl BusDevice for DummyDevice {
fn read(&mut self, _base: u64, _offset: u64, _data: &mut [u8]) {}
fn write(&mut self, _base: u64, _offset: u64, _data: &[u8]) -> Option<Arc<Barrier>> {
None
}
}
#[test]
fn test_handle_kvm_exit() {
let (_, _, mut vcpu) = setup_vcpu(0x1000);
let res = handle_kvm_exit(&mut vcpu.kvm_vcpu.peripherals, Ok(VcpuExit::Hlt));
assert_eq!(res.unwrap(), VcpuEmulation::Stopped);
let res = handle_kvm_exit(&mut vcpu.kvm_vcpu.peripherals, Ok(VcpuExit::Shutdown));
assert_eq!(res.unwrap(), VcpuEmulation::Stopped);
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Ok(VcpuExit::FailEntry(0, 0)),
);
assert_eq!(
format!("{:?}", res.unwrap_err()),
format!(
"{:?}",
EmulationError::FaultyKvmExit("FailEntry(0, 0)".to_string())
)
);
let res = handle_kvm_exit(&mut vcpu.kvm_vcpu.peripherals, Ok(VcpuExit::InternalError));
assert_eq!(
format!("{:?}", res.unwrap_err()),
format!(
"{:?}",
EmulationError::FaultyKvmExit("InternalError".to_string())
)
);
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Ok(VcpuExit::SystemEvent(2, &[])),
);
assert_eq!(res.unwrap(), VcpuEmulation::Stopped);
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Ok(VcpuExit::SystemEvent(1, &[])),
);
assert_eq!(res.unwrap(), VcpuEmulation::Stopped);
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Ok(VcpuExit::SystemEvent(3, &[])),
);
assert_eq!(
format!("{:?}", res.unwrap_err()),
format!(
"{:?}",
EmulationError::FaultyKvmExit("SystemEvent(3, [])".to_string())
)
);
// Check what happens with an unhandled exit reason.
let res = handle_kvm_exit(&mut vcpu.kvm_vcpu.peripherals, Ok(VcpuExit::Unknown));
assert_eq!(
res.unwrap_err().to_string(),
"Unexpected kvm exit received: Unknown".to_string()
);
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Err(errno::Error::new(libc::EAGAIN)),
);
assert_eq!(res.unwrap(), VcpuEmulation::Handled);
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Err(errno::Error::new(libc::ENOSYS)),
);
assert_eq!(
format!("{:?}", res.unwrap_err()),
format!(
"{:?}",
EmulationError::FaultyKvmExit(
"Received ENOSYS error because KVM failed to emulate an instruction."
.to_string()
)
)
);
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Err(errno::Error::new(libc::EINVAL)),
);
assert_eq!(
format!("{:?}", res.unwrap_err()),
format!(
"{:?}",
EmulationError::FaultyKvmExit("Invalid argument (os error 22)".to_string())
)
);
let bus = Arc::new(Bus::new());
let dummy = Arc::new(Mutex::new(DummyDevice));
bus.insert(dummy, 0x10, 0x10).unwrap();
vcpu.set_mmio_bus(bus);
let addr = 0x10;
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Ok(VcpuExit::MmioRead(addr, &mut [0, 0, 0, 0])),
);
assert_eq!(res.unwrap(), VcpuEmulation::Handled);
let res = handle_kvm_exit(
&mut vcpu.kvm_vcpu.peripherals,
Ok(VcpuExit::MmioWrite(addr, &[0, 0, 0, 0])),
);
assert_eq!(res.unwrap(), VcpuEmulation::Handled);
}
impl PartialEq for VcpuResponse {
fn eq(&self, other: &Self) -> bool {
use crate::VcpuResponse::*;
// Guard match with no wildcard to make sure we catch new enum variants.
match self {
Paused | Resumed | Exited(_) => (),
Error(_) | NotAllowed(_) | SavedState(_) | DumpedCpuConfig(_) => (),
};
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vstate/memory.rs | src/vmm/src/vstate/memory.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fs::File;
use std::io::SeekFrom;
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use bitvec::vec::BitVec;
use kvm_bindings::{KVM_MEM_LOG_DIRTY_PAGES, kvm_userspace_memory_region};
use log::error;
use serde::{Deserialize, Serialize};
pub use vm_memory::bitmap::{AtomicBitmap, BS, Bitmap, BitmapSlice};
pub use vm_memory::mmap::MmapRegionBuilder;
use vm_memory::mmap::{MmapRegionError, NewBitmap};
pub use vm_memory::{
Address, ByteValued, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion,
GuestUsize, MemoryRegionAddress, MmapRegion, address,
};
use vm_memory::{GuestMemoryError, GuestMemoryRegionBytes, VolatileSlice, WriteVolatile};
use vmm_sys_util::errno;
use crate::utils::{get_page_size, u64_to_usize};
use crate::vmm_config::machine_config::HugePageConfig;
use crate::vstate::vm::VmError;
use crate::{DirtyBitmap, Vm};
/// Type of GuestRegionMmap.
pub type GuestRegionMmap = vm_memory::GuestRegionMmap<Option<AtomicBitmap>>;
/// Type of GuestMemoryMmap.
pub type GuestMemoryMmap = vm_memory::GuestRegionCollection<GuestRegionMmapExt>;
/// Type of GuestMmapRegion.
pub type GuestMmapRegion = vm_memory::MmapRegion<Option<AtomicBitmap>>;
/// Errors associated with dumping guest memory to file.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MemoryError {
/// Cannot fetch system's page size: {0}
PageSize(errno::Error),
/// Cannot dump memory: {0}
WriteMemory(GuestMemoryError),
/// Cannot create mmap region: {0}
MmapRegionError(MmapRegionError),
/// Cannot create guest memory
VmMemoryError,
/// Cannot create memfd: {0}
Memfd(memfd::Error),
/// Cannot resize memfd file: {0}
MemfdSetLen(std::io::Error),
/// Total sum of memory regions exceeds largest possible file offset
OffsetTooLarge,
/// Cannot retrieve snapshot file metadata: {0}
FileMetadata(std::io::Error),
/// Memory region is not aligned
Unaligned,
/// Error protecting memory slot: {0}
Mprotect(std::io::Error),
}
/// Type of the guest region
#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub enum GuestRegionType {
/// Guest DRAM
Dram,
/// Hotpluggable memory
Hotpluggable,
}
/// An extension to GuestMemoryRegion that can be split into multiple KVM slots of
/// the same slot_size, and stores the type of region, and the starting KVM slot number.
#[derive(Debug)]
pub struct GuestRegionMmapExt {
/// the wrapped GuestRegionMmap
pub inner: GuestRegionMmap,
/// the type of region
pub region_type: GuestRegionType,
/// the starting KVM slot number assigned to this region
pub slot_from: u32,
/// the size of the slots of this region
pub slot_size: usize,
/// a bitvec indicating whether slot `i` is plugged into KVM (1) or not (0)
pub plugged: Mutex<BitVec>,
}
/// A guest memory slot, which is a slice of a guest memory region
#[derive(Debug)]
pub struct GuestMemorySlot<'a> {
/// KVM memory slot number
pub(crate) slot: u32,
/// Start guest address of the slot
pub(crate) guest_addr: GuestAddress,
/// Corresponding slice in host memory
pub(crate) slice: VolatileSlice<'a, BS<'a, Option<AtomicBitmap>>>,
}
impl From<&GuestMemorySlot<'_>> for kvm_userspace_memory_region {
fn from(mem_slot: &GuestMemorySlot) -> Self {
let flags = if mem_slot.slice.bitmap().is_some() {
KVM_MEM_LOG_DIRTY_PAGES
} else {
0
};
kvm_userspace_memory_region {
flags,
slot: mem_slot.slot,
guest_phys_addr: mem_slot.guest_addr.raw_value(),
memory_size: mem_slot.slice.len() as u64,
userspace_addr: mem_slot.slice.ptr_guard().as_ptr() as u64,
}
}
}
impl<'a> GuestMemorySlot<'a> {
/// Dumps the dirty pages in this slot onto the writer
pub(crate) fn dump_dirty<T: WriteVolatile + std::io::Seek>(
&self,
writer: &mut T,
kvm_bitmap: &[u64],
page_size: usize,
) -> Result<(), GuestMemoryError> {
let firecracker_bitmap = self.slice.bitmap();
let mut write_size = 0;
let mut skip_size = 0;
let mut dirty_batch_start = 0;
for (i, v) in kvm_bitmap.iter().enumerate() {
for j in 0..64 {
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
let page_offset = ((i * 64) + j) * page_size;
let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset);
if is_kvm_page_dirty || is_firecracker_page_dirty {
// We are at the start of a new batch of dirty pages.
if skip_size > 0 {
// Seek forward over the unmodified pages.
writer
.seek(SeekFrom::Current(skip_size.try_into().unwrap()))
.unwrap();
dirty_batch_start = page_offset;
skip_size = 0;
}
write_size += page_size;
} else {
// We are at the end of a batch of dirty pages.
if write_size > 0 {
// Dump the dirty pages.
let slice = &self.slice.subslice(dirty_batch_start, write_size)?;
writer.write_all_volatile(slice)?;
write_size = 0;
}
skip_size += page_size;
}
}
}
if write_size > 0 {
writer.write_all_volatile(&self.slice.subslice(dirty_batch_start, write_size)?)?;
}
Ok(())
}
/// Makes the slot host memory PROT_NONE (true) or PROT_READ|PROT_WRITE (false)
pub(crate) fn protect(&self, protected: bool) -> Result<(), MemoryError> {
let prot = if protected {
libc::PROT_NONE
} else {
libc::PROT_READ | libc::PROT_WRITE
};
// SAFETY: Parameters refer to an existing host memory region
let ret = unsafe {
libc::mprotect(
self.slice.ptr_guard_mut().as_ptr().cast(),
self.slice.len(),
prot,
)
};
if ret != 0 {
Err(MemoryError::Mprotect(std::io::Error::last_os_error()))
} else {
Ok(())
}
}
}
fn addr_in_range(addr: GuestAddress, start: GuestAddress, len: usize) -> bool {
if let Some(end) = start.checked_add(len as u64) {
addr >= start && addr < end
} else {
false
}
}
impl GuestRegionMmapExt {
/// Adds a DRAM region which only contains a single plugged slot
pub(crate) fn dram_from_mmap_region(region: GuestRegionMmap, slot: u32) -> Self {
let slot_size = u64_to_usize(region.len());
GuestRegionMmapExt {
inner: region,
region_type: GuestRegionType::Dram,
slot_from: slot,
slot_size,
plugged: Mutex::new(BitVec::repeat(true, 1)),
}
}
/// Adds an hotpluggable region which can contain multiple slots and is initially unplugged
pub(crate) fn hotpluggable_from_mmap_region(
region: GuestRegionMmap,
slot_from: u32,
slot_size: usize,
) -> Self {
let slot_cnt = (u64_to_usize(region.len())) / slot_size;
GuestRegionMmapExt {
inner: region,
region_type: GuestRegionType::Hotpluggable,
slot_from,
slot_size,
plugged: Mutex::new(BitVec::repeat(false, slot_cnt)),
}
}
pub(crate) fn from_state(
region: GuestRegionMmap,
state: &GuestMemoryRegionState,
slot_from: u32,
) -> Result<Self, MemoryError> {
let slot_cnt = state.plugged.len();
let slot_size = u64_to_usize(region.len())
.checked_div(slot_cnt)
.ok_or(MemoryError::Unaligned)?;
Ok(GuestRegionMmapExt {
inner: region,
slot_size,
region_type: state.region_type,
slot_from,
plugged: Mutex::new(BitVec::from_iter(state.plugged.iter())),
})
}
pub(crate) fn slot_cnt(&self) -> u32 {
u32::try_from(u64_to_usize(self.len()) / self.slot_size).unwrap()
}
pub(crate) fn mem_slot(&self, slot: u32) -> GuestMemorySlot<'_> {
assert!(slot >= self.slot_from && slot < self.slot_from + self.slot_cnt());
let offset = ((slot - self.slot_from) as u64) * (self.slot_size as u64);
GuestMemorySlot {
slot,
guest_addr: self.start_addr().unchecked_add(offset),
slice: self
.inner
.get_slice(MemoryRegionAddress(offset), self.slot_size)
.expect("slot range should be valid"),
}
}
/// Returns a snapshot of the slots and their state at the time of calling
///
/// Note: to avoid TOCTOU races use only within VMM thread.
pub(crate) fn slots(&self) -> impl Iterator<Item = (GuestMemorySlot<'_>, bool)> {
self.plugged
.lock()
.unwrap()
.iter()
.enumerate()
.map(|(i, b)| {
(
self.mem_slot(self.slot_from + u32::try_from(i).unwrap()),
*b,
)
})
.collect::<Vec<_>>()
.into_iter()
}
/// Returns a snapshot of the plugged slots at the time of calling
///
/// Note: to avoid TOCTOU races use only within VMM thread.
pub(crate) fn plugged_slots(&self) -> impl Iterator<Item = GuestMemorySlot<'_>> {
self.slots()
.filter(|(_, plugged)| *plugged)
.map(|(slot, _)| slot)
}
pub(crate) fn slots_intersecting_range(
&self,
from: GuestAddress,
len: usize,
) -> impl Iterator<Item = GuestMemorySlot<'_>> {
self.slots().map(|(slot, _)| slot).filter(move |slot| {
if let Some(slot_end) = slot.guest_addr.checked_add(slot.slice.len() as u64) {
addr_in_range(slot.guest_addr, from, len) || addr_in_range(slot_end, from, len)
} else {
false
}
})
}
/// (un)plug a slot from an Hotpluggable memory region
pub(crate) fn update_slot(
&self,
vm: &Vm,
mem_slot: &GuestMemorySlot<'_>,
plug: bool,
) -> Result<(), VmError> {
// This function can only be called on hotpluggable regions!
assert!(self.region_type == GuestRegionType::Hotpluggable);
let mut bitmap_guard = self.plugged.lock().unwrap();
let prev = bitmap_guard.replace((mem_slot.slot - self.slot_from) as usize, plug);
// do not do anything if the state is what we're trying to set
if prev == plug {
return Ok(());
}
let mut kvm_region = kvm_userspace_memory_region::from(mem_slot);
if plug {
// make it accessible _before_ adding it to KVM
mem_slot.protect(false)?;
vm.set_user_memory_region(kvm_region)?;
} else {
// to remove it we need to pass a size of zero
kvm_region.memory_size = 0;
vm.set_user_memory_region(kvm_region)?;
// make it protected _after_ removing it from KVM
mem_slot.protect(true)?;
}
Ok(())
}
pub(crate) fn discard_range(
&self,
caddr: MemoryRegionAddress,
len: usize,
) -> Result<(), GuestMemoryError> {
let phys_address = self.get_host_address(caddr)?;
match (self.inner.file_offset(), self.inner.flags()) {
// If and only if we are resuming from a snapshot file, we have a file and it's mapped
// private
(Some(_), flags) if flags & libc::MAP_PRIVATE != 0 => {
// Mmap a new anonymous region over the present one in order to create a hole
// with zero pages.
// This workaround is (only) needed after resuming from a snapshot file because the
// guest memory is mmaped from file as private. In this case, MADV_DONTNEED on the
// file only drops any anonymous pages in range, but subsequent accesses would read
// whatever page is stored on the backing file. Mmapping anonymous pages ensures
// it's zeroed.
// SAFETY: The address and length are known to be valid.
let ret = unsafe {
libc::mmap(
phys_address.cast(),
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_FIXED | libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
-1,
0,
)
};
if ret == libc::MAP_FAILED {
let os_error = std::io::Error::last_os_error();
error!("discard_range: mmap failed: {:?}", os_error);
Err(GuestMemoryError::IOError(os_error))
} else {
Ok(())
}
}
// Match either the case of an anonymous mapping, or the case
// of a shared file mapping.
// TODO: madvise(MADV_DONTNEED) doesn't actually work with memfd
// (or in general MAP_SHARED of a fd). In those cases we should use
// fallocate64(FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE).
// We keep falling to the madvise branch to keep the previous behaviour.
_ => {
// Madvise the region in order to mark it as not used.
// SAFETY: The address and length are known to be valid.
let ret = unsafe { libc::madvise(phys_address.cast(), len, libc::MADV_DONTNEED) };
if ret < 0 {
let os_error = std::io::Error::last_os_error();
error!("discard_range: madvise failed: {:?}", os_error);
Err(GuestMemoryError::IOError(os_error))
} else {
Ok(())
}
}
}
}
}
impl Deref for GuestRegionMmapExt {
type Target = MmapRegion<Option<AtomicBitmap>>;
fn deref(&self) -> &MmapRegion<Option<AtomicBitmap>> {
&self.inner
}
}
impl GuestMemoryRegionBytes for GuestRegionMmapExt {}
#[allow(clippy::cast_possible_wrap)]
#[allow(clippy::cast_possible_truncation)]
impl GuestMemoryRegion for GuestRegionMmapExt {
type B = Option<AtomicBitmap>;
fn len(&self) -> GuestUsize {
self.inner.len()
}
fn start_addr(&self) -> GuestAddress {
self.inner.start_addr()
}
fn bitmap(&self) -> BS<'_, Self::B> {
self.inner.bitmap()
}
fn get_host_address(
&self,
addr: MemoryRegionAddress,
) -> vm_memory::guest_memory::Result<*mut u8> {
self.inner.get_host_address(addr)
}
fn file_offset(&self) -> Option<&FileOffset> {
self.inner.file_offset()
}
fn get_slice(
&self,
offset: MemoryRegionAddress,
count: usize,
) -> vm_memory::guest_memory::Result<VolatileSlice<'_, BS<'_, Self::B>>> {
self.inner.get_slice(offset, count)
}
}
/// Creates a `Vec` of `GuestRegionMmap` with the given configuration
pub fn create(
regions: impl Iterator<Item = (GuestAddress, usize)>,
mmap_flags: libc::c_int,
file: Option<File>,
track_dirty_pages: bool,
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
let mut offset = 0;
let file = file.map(Arc::new);
regions
.map(|(start, size)| {
let mut builder = MmapRegionBuilder::new_with_bitmap(
size,
track_dirty_pages.then(|| AtomicBitmap::with_len(size)),
)
.with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE)
.with_mmap_flags(libc::MAP_NORESERVE | mmap_flags);
if let Some(ref file) = file {
let file_offset = FileOffset::from_arc(Arc::clone(file), offset);
builder = builder.with_file_offset(file_offset);
}
offset = match offset.checked_add(size as u64) {
None => return Err(MemoryError::OffsetTooLarge),
Some(new_off) if new_off >= i64::MAX as u64 => {
return Err(MemoryError::OffsetTooLarge);
}
Some(new_off) => new_off,
};
GuestRegionMmap::new(
builder.build().map_err(MemoryError::MmapRegionError)?,
start,
)
.ok_or(MemoryError::VmMemoryError)
})
.collect::<Result<Vec<_>, _>>()
}
/// Creates a GuestMemoryMmap with `size` in MiB backed by a memfd.
pub fn memfd_backed(
regions: &[(GuestAddress, usize)],
track_dirty_pages: bool,
huge_pages: HugePageConfig,
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
let size = regions.iter().map(|&(_, size)| size as u64).sum();
let memfd_file = create_memfd(size, huge_pages.into())?.into_file();
create(
regions.iter().copied(),
libc::MAP_SHARED | huge_pages.mmap_flags(),
Some(memfd_file),
track_dirty_pages,
)
}
/// Creates a GuestMemoryMmap from raw regions.
pub fn anonymous(
regions: impl Iterator<Item = (GuestAddress, usize)>,
track_dirty_pages: bool,
huge_pages: HugePageConfig,
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
create(
regions,
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS | huge_pages.mmap_flags(),
None,
track_dirty_pages,
)
}
/// Creates a GuestMemoryMmap given a `file` containing the data
/// and a `state` containing mapping information.
pub fn snapshot_file(
file: File,
regions: impl Iterator<Item = (GuestAddress, usize)>,
track_dirty_pages: bool,
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
let regions: Vec<_> = regions.collect();
let memory_size = regions
.iter()
.try_fold(0u64, |acc, (_, size)| acc.checked_add(*size as u64))
.ok_or(MemoryError::OffsetTooLarge)?;
let file_size = file.metadata().map_err(MemoryError::FileMetadata)?.len();
// ensure we do not mmap beyond EOF. The kernel would allow that but a SIGBUS is triggered
// on an attempted access to a page of the buffer that lies beyond the end of the mapped file.
if memory_size > file_size {
return Err(MemoryError::OffsetTooLarge);
}
create(
regions.into_iter(),
libc::MAP_PRIVATE,
Some(file),
track_dirty_pages,
)
}
/// Defines the interface for snapshotting memory.
pub trait GuestMemoryExtension
where
Self: Sized,
{
/// Describes GuestMemoryMmap through a GuestMemoryState struct.
fn describe(&self) -> GuestMemoryState;
/// Mark memory range as dirty
fn mark_dirty(&self, addr: GuestAddress, len: usize);
/// Dumps all contents of GuestMemoryMmap to a writer.
fn dump<T: WriteVolatile + std::io::Seek>(&self, writer: &mut T) -> Result<(), MemoryError>;
/// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer.
fn dump_dirty<T: WriteVolatile + std::io::Seek>(
&self,
writer: &mut T,
dirty_bitmap: &DirtyBitmap,
) -> Result<(), MemoryError>;
/// Resets all the memory region bitmaps
fn reset_dirty(&self);
/// Store the dirty bitmap in internal store
fn store_dirty_bitmap(&self, dirty_bitmap: &DirtyBitmap, page_size: usize);
/// Apply a function to each region in a memory range
fn try_for_each_region_in_range<F>(
&self,
addr: GuestAddress,
range_len: usize,
f: F,
) -> Result<(), GuestMemoryError>
where
F: FnMut(&GuestRegionMmapExt, MemoryRegionAddress, usize) -> Result<(), GuestMemoryError>;
/// Discards a memory range, freeing up memory pages
fn discard_range(&self, addr: GuestAddress, range_len: usize) -> Result<(), GuestMemoryError>;
}
/// State of a guest memory region saved to file/buffer.
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct GuestMemoryRegionState {
// This should have been named `base_guest_addr` since it's _guest_ addr, but for
// backward compatibility we have to keep this name. At least this comment should help.
/// Base GuestAddress.
pub base_address: u64,
/// Region size.
pub size: usize,
/// Region type
pub region_type: GuestRegionType,
/// Plugged/unplugged status of each slot
pub plugged: Vec<bool>,
}
/// Describes guest memory regions and their snapshot file mappings.
#[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct GuestMemoryState {
/// List of regions.
pub regions: Vec<GuestMemoryRegionState>,
}
impl GuestMemoryState {
/// Turns this [`GuestMemoryState`] into a description of guest memory regions as understood
/// by the creation functions of [`GuestMemoryExtensions`]
pub fn regions(&self) -> impl Iterator<Item = (GuestAddress, usize)> + '_ {
self.regions
.iter()
.map(|region| (GuestAddress(region.base_address), region.size))
}
}
impl GuestMemoryExtension for GuestMemoryMmap {
/// Describes GuestMemoryMmap through a GuestMemoryState struct.
fn describe(&self) -> GuestMemoryState {
let mut guest_memory_state = GuestMemoryState::default();
self.iter().for_each(|region| {
guest_memory_state.regions.push(GuestMemoryRegionState {
base_address: region.start_addr().0,
size: u64_to_usize(region.len()),
region_type: region.region_type,
plugged: region.plugged.lock().unwrap().iter().by_vals().collect(),
});
});
guest_memory_state
}
/// Mark memory range as dirty
fn mark_dirty(&self, addr: GuestAddress, len: usize) {
// ignore invalid ranges using .flatten()
for slice in self.get_slices(addr, len).flatten() {
slice.bitmap().mark_dirty(0, slice.len());
}
}
/// Dumps all contents of GuestMemoryMmap to a writer.
fn dump<T: WriteVolatile + std::io::Seek>(&self, writer: &mut T) -> Result<(), MemoryError> {
self.iter()
.flat_map(|region| region.slots())
.try_for_each(|(mem_slot, plugged)| {
if !plugged {
let ilen = i64::try_from(mem_slot.slice.len()).unwrap();
writer.seek(SeekFrom::Current(ilen)).unwrap();
} else {
writer.write_all_volatile(&mem_slot.slice)?;
}
Ok(())
})
.map_err(MemoryError::WriteMemory)
}
/// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer.
fn dump_dirty<T: WriteVolatile + std::io::Seek>(
&self,
writer: &mut T,
dirty_bitmap: &DirtyBitmap,
) -> Result<(), MemoryError> {
let page_size = get_page_size().map_err(MemoryError::PageSize)?;
let write_result =
self.iter()
.flat_map(|region| region.slots())
.try_for_each(|(mem_slot, plugged)| {
if !plugged {
let ilen = i64::try_from(mem_slot.slice.len()).unwrap();
writer.seek(SeekFrom::Current(ilen)).unwrap();
} else {
let kvm_bitmap = dirty_bitmap.get(&mem_slot.slot).unwrap();
mem_slot.dump_dirty(writer, kvm_bitmap, page_size)?;
}
Ok(())
});
if write_result.is_err() {
self.store_dirty_bitmap(dirty_bitmap, page_size);
} else {
self.reset_dirty();
}
write_result.map_err(MemoryError::WriteMemory)
}
/// Resets all the memory region bitmaps
fn reset_dirty(&self) {
self.iter().for_each(|region| {
if let Some(bitmap) = (**region).bitmap() {
bitmap.reset();
}
})
}
/// Stores the dirty bitmap inside into the internal bitmap
fn store_dirty_bitmap(&self, dirty_bitmap: &DirtyBitmap, page_size: usize) {
self.iter()
.flat_map(|region| region.plugged_slots())
.for_each(|mem_slot| {
let kvm_bitmap = dirty_bitmap.get(&mem_slot.slot).unwrap();
let firecracker_bitmap = mem_slot.slice.bitmap();
for (i, v) in kvm_bitmap.iter().enumerate() {
for j in 0..64 {
let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64;
if is_kvm_page_dirty {
let page_offset = ((i * 64) + j) * page_size;
firecracker_bitmap.mark_dirty(page_offset, 1)
}
}
}
});
}
fn try_for_each_region_in_range<F>(
&self,
addr: GuestAddress,
range_len: usize,
mut f: F,
) -> Result<(), GuestMemoryError>
where
F: FnMut(&GuestRegionMmapExt, MemoryRegionAddress, usize) -> Result<(), GuestMemoryError>,
{
let mut cur = addr;
let mut remaining = range_len;
// iterate over all adjacent consecutive regions in range
while let Some(region) = self.find_region(cur) {
let start = region.to_region_addr(cur).unwrap();
let len = std::cmp::min(
// remaining bytes inside the region
u64_to_usize(region.len() - start.raw_value()),
// remaning bytes to discard
remaining,
);
f(region, start, len)?;
remaining -= len;
if remaining == 0 {
return Ok(());
}
cur = cur
.checked_add(len as u64)
.ok_or(GuestMemoryError::GuestAddressOverflow)?;
}
// if we exit the loop because we didn't find a region, return an error
Err(GuestMemoryError::InvalidGuestAddress(cur))
}
fn discard_range(&self, addr: GuestAddress, range_len: usize) -> Result<(), GuestMemoryError> {
self.try_for_each_region_in_range(addr, range_len, |region, start, len| {
region.discard_range(start, len)
})
}
}
fn create_memfd(
mem_size: u64,
hugetlb_size: Option<memfd::HugetlbSize>,
) -> Result<memfd::Memfd, MemoryError> {
// Create a memfd.
let opts = memfd::MemfdOptions::default()
.hugetlb(hugetlb_size)
.allow_sealing(true);
let mem_file = opts.create("guest_mem").map_err(MemoryError::Memfd)?;
// Resize to guest mem size.
mem_file
.as_file()
.set_len(mem_size)
.map_err(MemoryError::MemfdSetLen)?;
// Add seals to prevent further resizing.
let mut seals = memfd::SealsHashSet::new();
seals.insert(memfd::FileSeal::SealShrink);
seals.insert(memfd::FileSeal::SealGrow);
mem_file.add_seals(&seals).map_err(MemoryError::Memfd)?;
// Prevent further sealing changes.
mem_file
.add_seal(memfd::FileSeal::SealSeal)
.map_err(MemoryError::Memfd)?;
Ok(mem_file)
}
/// Test utilities
pub mod test_utils {
use super::*;
/// Converts a vec of GuestRegionMmap into a GuestMemoryMmap using GuestRegionMmapExt
pub fn into_region_ext(regions: Vec<GuestRegionMmap>) -> GuestMemoryMmap {
GuestMemoryMmap::from_regions(
regions
.into_iter()
.zip(0u32..) // assign dummy slots
.map(|(region, slot)| GuestRegionMmapExt::dram_from_mmap_region(region, slot))
.collect(),
)
.unwrap()
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::collections::HashMap;
use std::io::{Read, Seek, Write};
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::snapshot::Snapshot;
use crate::test_utils::single_region_mem;
use crate::utils::{get_page_size, mib_to_bytes};
use crate::vstate::memory::test_utils::into_region_ext;
#[test]
fn test_anonymous() {
for dirty_page_tracking in [true, false] {
let region_size = 0x10000;
let regions = vec![
(GuestAddress(0x0), region_size),
(GuestAddress(0x10000), region_size),
(GuestAddress(0x20000), region_size),
(GuestAddress(0x30000), region_size),
];
let guest_memory = anonymous(
regions.into_iter(),
dirty_page_tracking,
HugePageConfig::None,
)
.unwrap();
guest_memory.iter().for_each(|region| {
assert_eq!(region.bitmap().is_some(), dirty_page_tracking);
});
}
}
#[test]
fn test_snapshot_file_success() {
for dirty_page_tracking in [true, false] {
let page_size = 0x1000;
let mut file = TempFile::new().unwrap().into_file();
file.set_len(page_size as u64).unwrap();
file.write_all(&vec![0x42u8; page_size]).unwrap();
let regions = vec![(GuestAddress(0), page_size)];
let guest_regions =
snapshot_file(file, regions.into_iter(), dirty_page_tracking).unwrap();
assert_eq!(guest_regions.len(), 1);
guest_regions.iter().for_each(|region| {
assert_eq!(region.bitmap().is_some(), dirty_page_tracking);
});
}
}
#[test]
fn test_snapshot_file_multiple_regions() {
let page_size = 0x1000;
let total_size = 3 * page_size;
let mut file = TempFile::new().unwrap().into_file();
file.set_len(total_size as u64).unwrap();
file.write_all(&vec![0x42u8; total_size]).unwrap();
let regions = vec![
(GuestAddress(0), page_size),
(GuestAddress(0x10000), page_size),
(GuestAddress(0x20000), page_size),
];
let guest_regions = snapshot_file(file, regions.into_iter(), false).unwrap();
assert_eq!(guest_regions.len(), 3);
}
#[test]
fn test_snapshot_file_offset_too_large() {
let page_size = 0x1000;
let mut file = TempFile::new().unwrap().into_file();
file.set_len(page_size as u64).unwrap();
file.write_all(&vec![0x42u8; page_size]).unwrap();
let regions = vec![(GuestAddress(0), 2 * page_size)];
let result = snapshot_file(file, regions.into_iter(), false);
assert!(matches!(result.unwrap_err(), MemoryError::OffsetTooLarge));
}
#[test]
fn test_mark_dirty() {
let page_size = get_page_size().unwrap();
let region_size = page_size * 3;
let regions = vec![
(GuestAddress(0), region_size), // pages 0-2
(GuestAddress(region_size as u64), region_size), // pages 3-5
(GuestAddress(region_size as u64 * 2), region_size), // pages 6-8
];
let guest_memory =
into_region_ext(anonymous(regions.into_iter(), true, HugePageConfig::None).unwrap());
let dirty_map = [
// page 0: not dirty
(0, page_size, false),
// pages 1-2: dirty range in one region
(page_size, page_size * 2, true),
// page 3: not dirty
(page_size * 3, page_size, false),
// pages 4-7: dirty range across 2 regions,
(page_size * 4, page_size * 4, true),
// page 8: not dirty
(page_size * 8, page_size, false),
];
// Mark dirty memory
for (addr, len, dirty) in &dirty_map {
if *dirty {
guest_memory.mark_dirty(GuestAddress(*addr as u64), *len);
}
}
// Check that the dirty memory was set correctly
for (addr, len, dirty) in &dirty_map {
for slice in guest_memory
.get_slices(GuestAddress(*addr as u64), *len)
.flatten()
{
for i in 0..slice.len() {
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vstate/resources.rs | src/vmm/src/vstate/resources.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::Infallible;
use serde::{Deserialize, Serialize};
pub use vm_allocator::AllocPolicy;
use vm_allocator::{AddressAllocator, IdAllocator};
use crate::arch;
use crate::snapshot::Persist;
/// Helper function to allocate many ids from an id allocator
fn allocate_many_ids(
id_allocator: &mut IdAllocator,
count: u32,
) -> Result<Vec<u32>, vm_allocator::Error> {
let mut ids = Vec::with_capacity(count as usize);
for _ in 0..count {
match id_allocator.allocate_id() {
Ok(id) => ids.push(id),
Err(err) => {
// It is ok to unwrap here, we just allocated the GSI
ids.into_iter().for_each(|id| {
id_allocator.free_id(id).unwrap();
});
return Err(err);
}
}
}
Ok(ids)
}
/// A resource manager for (de)allocating interrupt lines (GSIs) and guest memory
///
/// At the moment, we support:
///
/// * GSIs for legacy x86_64 devices
/// * GSIs for MMIO devicecs
/// * Memory allocations in the MMIO address space
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResourceAllocator {
/// Allocator for legacy device interrupt lines
pub gsi_legacy_allocator: IdAllocator,
/// Allocator for PCI device GSIs
pub gsi_msi_allocator: IdAllocator,
/// Allocator for memory in the 32-bit MMIO address space
pub mmio32_memory: AddressAllocator,
/// Allocator for memory in the 64-bit MMIO address space
pub mmio64_memory: AddressAllocator,
/// Allocator for memory after the 64-bit MMIO address space
pub past_mmio64_memory: AddressAllocator,
/// Memory allocator for system data
pub system_memory: AddressAllocator,
}
impl Default for ResourceAllocator {
fn default() -> Self {
ResourceAllocator::new()
}
}
impl ResourceAllocator {
/// Create a new resource allocator for Firecracker devices
pub fn new() -> Self {
// It is fine for us to unwrap the following since we know we are passing valid ranges for
// all allocators
Self {
gsi_legacy_allocator: IdAllocator::new(arch::GSI_LEGACY_START, arch::GSI_LEGACY_END)
.unwrap(),
gsi_msi_allocator: IdAllocator::new(arch::GSI_MSI_START, arch::GSI_MSI_END).unwrap(),
mmio32_memory: AddressAllocator::new(
arch::MEM_32BIT_DEVICES_START,
arch::MEM_32BIT_DEVICES_SIZE,
)
.unwrap(),
mmio64_memory: AddressAllocator::new(
arch::MEM_64BIT_DEVICES_START,
arch::MEM_64BIT_DEVICES_SIZE,
)
.unwrap(),
past_mmio64_memory: AddressAllocator::new(
arch::FIRST_ADDR_PAST_64BITS_MMIO,
arch::PAST_64BITS_MMIO_SIZE,
)
.unwrap(),
system_memory: AddressAllocator::new(arch::SYSTEM_MEM_START, arch::SYSTEM_MEM_SIZE)
.unwrap(),
}
}
/// Allocate a number of legacy GSIs
///
/// # Arguments
///
/// * `gsi_count` - The number of legacy GSIs to allocate
pub fn allocate_gsi_legacy(&mut self, gsi_count: u32) -> Result<Vec<u32>, vm_allocator::Error> {
allocate_many_ids(&mut self.gsi_legacy_allocator, gsi_count)
}
/// Allocate a number of GSIs for MSI
///
/// # Arguments
///
/// * `gsi_count` - The number of GSIs to allocate
pub fn allocate_gsi_msi(&mut self, gsi_count: u32) -> Result<Vec<u32>, vm_allocator::Error> {
allocate_many_ids(&mut self.gsi_msi_allocator, gsi_count)
}
/// Allocate a memory range in 32-bit MMIO address space
///
/// If it succeeds, it returns the first address of the allocated range
///
/// # Arguments
///
/// * `size` - The size in bytes of the memory to allocate
/// * `alignment` - The alignment of the address of the first byte
/// * `policy` - A [`vm_allocator::AllocPolicy`] variant for determining the allocation policy
pub fn allocate_32bit_mmio_memory(
&mut self,
size: u64,
alignment: u64,
policy: AllocPolicy,
) -> Result<u64, vm_allocator::Error> {
Ok(self
.mmio32_memory
.allocate(size, alignment, policy)?
.start())
}
/// Allocate a memory range in 64-bit MMIO address space
///
/// If it succeeds, it returns the first address of the allocated range
///
/// # Arguments
///
/// * `size` - The size in bytes of the memory to allocate
/// * `alignment` - The alignment of the address of the first byte
/// * `policy` - A [`vm_allocator::AllocPolicy`] variant for determining the allocation policy
pub fn allocate_64bit_mmio_memory(
&mut self,
size: u64,
alignment: u64,
policy: AllocPolicy,
) -> Result<u64, vm_allocator::Error> {
Ok(self
.mmio64_memory
.allocate(size, alignment, policy)?
.start())
}
/// Allocate a memory range for system data
///
/// If it succeeds, it returns the first address of the allocated range
///
/// # Arguments
///
/// * `size` - The size in bytes of the memory to allocate
/// * `alignment` - The alignment of the address of the first byte
/// * `policy` - A [`vm_allocator::AllocPolicy`] variant for determining the allocation policy
pub fn allocate_system_memory(
&mut self,
size: u64,
alignment: u64,
policy: AllocPolicy,
) -> Result<u64, vm_allocator::Error> {
Ok(self
.system_memory
.allocate(size, alignment, policy)?
.start())
}
}
impl<'a> Persist<'a> for ResourceAllocator {
type State = ResourceAllocator;
type ConstructorArgs = ();
type Error = Infallible;
fn save(&self) -> Self::State {
self.clone()
}
fn restore(
_constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
Ok(state.clone())
}
}
#[cfg(test)]
mod tests {
use vm_allocator::AllocPolicy;
use super::ResourceAllocator;
use crate::arch::{self, GSI_LEGACY_NUM, GSI_LEGACY_START, GSI_MSI_NUM, GSI_MSI_START};
use crate::snapshot::{Persist, Snapshot};
#[test]
fn test_allocate_irq() {
let mut allocator = ResourceAllocator::new();
// asking for 0 IRQs should return us an empty vector
assert_eq!(allocator.allocate_gsi_legacy(0), Ok(vec![]));
// We cannot allocate more GSIs than available
assert_eq!(
allocator.allocate_gsi_legacy(GSI_LEGACY_NUM + 1),
Err(vm_allocator::Error::ResourceNotAvailable)
);
// But allocating all of them at once should work
assert_eq!(
allocator.allocate_gsi_legacy(GSI_LEGACY_NUM),
Ok((arch::GSI_LEGACY_START..=arch::GSI_LEGACY_END).collect::<Vec<_>>())
);
// And now we ran out of GSIs
assert_eq!(
allocator.allocate_gsi_legacy(1),
Err(vm_allocator::Error::ResourceNotAvailable)
);
// But we should be able to ask for 0 GSIs
assert_eq!(allocator.allocate_gsi_legacy(0), Ok(vec![]));
let mut allocator = ResourceAllocator::new();
// We should be able to allocate 1 GSI
assert_eq!(
allocator.allocate_gsi_legacy(1),
Ok(vec![arch::GSI_LEGACY_START])
);
// We can't allocate MAX_IRQS any more
assert_eq!(
allocator.allocate_gsi_legacy(GSI_LEGACY_NUM),
Err(vm_allocator::Error::ResourceNotAvailable)
);
// We can allocate another one and it should be the second available
assert_eq!(
allocator.allocate_gsi_legacy(1),
Ok(vec![arch::GSI_LEGACY_START + 1])
);
// Let's allocate the rest in a loop
for i in arch::GSI_LEGACY_START + 2..=arch::GSI_LEGACY_END {
assert_eq!(allocator.allocate_gsi_legacy(1), Ok(vec![i]));
}
}
#[test]
fn test_allocate_gsi() {
let mut allocator = ResourceAllocator::new();
// asking for 0 IRQs should return us an empty vector
assert_eq!(allocator.allocate_gsi_msi(0), Ok(vec![]));
// We cannot allocate more GSIs than available
assert_eq!(
allocator.allocate_gsi_msi(GSI_MSI_NUM + 1),
Err(vm_allocator::Error::ResourceNotAvailable)
);
// But allocating all of them at once should work
assert_eq!(
allocator.allocate_gsi_msi(GSI_MSI_NUM),
Ok((arch::GSI_MSI_START..=arch::GSI_MSI_END).collect::<Vec<_>>())
);
// And now we ran out of GSIs
assert_eq!(
allocator.allocate_gsi_msi(1),
Err(vm_allocator::Error::ResourceNotAvailable)
);
// But we should be able to ask for 0 GSIs
assert_eq!(allocator.allocate_gsi_msi(0), Ok(vec![]));
let mut allocator = ResourceAllocator::new();
// We should be able to allocate 1 GSI
assert_eq!(allocator.allocate_gsi_msi(1), Ok(vec![arch::GSI_MSI_START]));
// We can't allocate MAX_IRQS any more
assert_eq!(
allocator.allocate_gsi_msi(GSI_MSI_NUM),
Err(vm_allocator::Error::ResourceNotAvailable)
);
// We can allocate another one and it should be the second available
assert_eq!(
allocator.allocate_gsi_msi(1),
Ok(vec![arch::GSI_MSI_START + 1])
);
// Let's allocate the rest in a loop
for i in arch::GSI_MSI_START + 2..=arch::GSI_MSI_END {
assert_eq!(allocator.allocate_gsi_msi(1), Ok(vec![i]));
}
}
fn clone_allocator(allocator: &ResourceAllocator) -> ResourceAllocator {
let mut buf = vec![0u8; 1024];
Snapshot::new(allocator.save())
.save(&mut buf.as_mut_slice())
.unwrap();
let restored_state: ResourceAllocator = Snapshot::load_without_crc_check(buf.as_slice())
.unwrap()
.data;
ResourceAllocator::restore((), &restored_state).unwrap()
}
#[test]
fn test_save_restore() {
let mut allocator0 = ResourceAllocator::new();
let irq_0 = allocator0.allocate_gsi_legacy(1).unwrap()[0];
assert_eq!(irq_0, GSI_LEGACY_START);
let gsi_0 = allocator0.allocate_gsi_msi(1).unwrap()[0];
assert_eq!(gsi_0, GSI_MSI_START);
let mut allocator1 = clone_allocator(&allocator0);
let irq_1 = allocator1.allocate_gsi_legacy(1).unwrap()[0];
assert_eq!(irq_1, GSI_LEGACY_START + 1);
let gsi_1 = allocator1.allocate_gsi_msi(1).unwrap()[0];
assert_eq!(gsi_1, GSI_MSI_START + 1);
let mmio32_mem = allocator1
.allocate_32bit_mmio_memory(0x42, 1, AllocPolicy::FirstMatch)
.unwrap();
assert_eq!(mmio32_mem, arch::MEM_32BIT_DEVICES_START);
let mmio64_mem = allocator1
.allocate_64bit_mmio_memory(0x42, 1, AllocPolicy::FirstMatch)
.unwrap();
assert_eq!(mmio64_mem, arch::MEM_64BIT_DEVICES_START);
let system_mem = allocator1
.allocate_system_memory(0x42, 1, AllocPolicy::FirstMatch)
.unwrap();
assert_eq!(system_mem, arch::SYSTEM_MEM_START);
let mut allocator2 = clone_allocator(&allocator1);
allocator2
.allocate_32bit_mmio_memory(0x42, 1, AllocPolicy::ExactMatch(mmio32_mem))
.unwrap_err();
allocator2
.allocate_64bit_mmio_memory(0x42, 1, AllocPolicy::ExactMatch(mmio64_mem))
.unwrap_err();
allocator2
.allocate_system_memory(0x42, 1, AllocPolicy::ExactMatch(system_mem))
.unwrap_err();
let irq_2 = allocator2.allocate_gsi_legacy(1).unwrap()[0];
assert_eq!(irq_2, GSI_LEGACY_START + 2);
let gsi_2 = allocator2.allocate_gsi_msi(1).unwrap()[0];
assert_eq!(gsi_2, GSI_MSI_START + 2);
let mmio32_mem = allocator1
.allocate_32bit_mmio_memory(0x42, 1, AllocPolicy::FirstMatch)
.unwrap();
assert_eq!(mmio32_mem, arch::MEM_32BIT_DEVICES_START + 0x42);
let mmio64_mem = allocator1
.allocate_64bit_mmio_memory(0x42, 1, AllocPolicy::FirstMatch)
.unwrap();
assert_eq!(mmio64_mem, arch::MEM_64BIT_DEVICES_START + 0x42);
let system_mem = allocator1
.allocate_system_memory(0x42, 1, AllocPolicy::FirstMatch)
.unwrap();
assert_eq!(system_mem, arch::SYSTEM_MEM_START + 0x42);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vstate/mod.rs | src/vmm/src/vstate/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Module with the implementation of a Bus that can hold devices.
pub mod bus;
/// VM interrupts implementation.
pub mod interrupts;
/// Module with Kvm implementation.
pub mod kvm;
/// Module with GuestMemory implementation.
pub mod memory;
/// Resource manager for devices.
pub mod resources;
/// Module with Vcpu implementation.
pub mod vcpu;
/// Module with Vm implementation.
pub mod vm;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vstate/bus.rs | src/vmm/src/vstate/bus.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//! Handles routing to devices in an address space.
use std::cmp::Ordering;
use std::collections::btree_map::BTreeMap;
use std::sync::{Arc, Barrier, Mutex, RwLock, Weak};
use std::{error, fmt, result};
/// Trait for devices that respond to reads or writes in an arbitrary address space.
///
/// The device does not care where it exists in address space as each method is only given an offset
/// into its allocated portion of address space.
#[allow(unused_variables)]
pub trait BusDevice: Send {
/// Reads at `offset` from this device
fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) {}
/// Writes at `offset` into this device
fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
None
}
}
/// Trait similar to [`BusDevice`] with the extra requirement that a device is `Send` and `Sync`.
#[allow(unused_variables)]
pub trait BusDeviceSync: Send + Sync {
/// Reads at `offset` from this device
fn read(&self, base: u64, offset: u64, data: &mut [u8]) {}
/// Writes at `offset` into this device
fn write(&self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
None
}
}
impl<B: BusDevice> BusDeviceSync for Mutex<B> {
/// Reads at `offset` from this device
fn read(&self, base: u64, offset: u64, data: &mut [u8]) {
self.lock()
.expect("Failed to acquire device lock")
.read(base, offset, data)
}
/// Writes at `offset` into this device
fn write(&self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
self.lock()
.expect("Failed to acquire device lock")
.write(base, offset, data)
}
}
/// Error type for [`Bus`]-related operations.
#[derive(Debug)]
pub enum BusError {
/// The insertion failed because the new device overlapped with an old device.
Overlap,
/// Failed to operate on zero sized range.
ZeroSizedRange,
/// Failed to find address range.
MissingAddressRange,
}
/// Result type for [`Bus`]-related operations.
pub type Result<T> = result::Result<T, BusError>;
impl fmt::Display for BusError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "bus_error: {self:?}")
}
}
impl error::Error for BusError {}
/// Holds a base and length representing the address space occupied by a `BusDevice`.
///
/// * base - The address at which the range start.
/// * len - The length of the range in bytes.
#[derive(Debug, Copy, Clone)]
pub struct BusRange {
/// base address of a range within a [`Bus`]
pub base: u64,
/// length of a range within a [`Bus`]
pub len: u64,
}
impl BusRange {
/// Returns true if there is overlap with the given range.
pub fn overlaps(&self, base: u64, len: u64) -> bool {
self.base < (base + len) && base < self.base + self.len
}
}
impl Eq for BusRange {}
impl PartialEq for BusRange {
fn eq(&self, other: &BusRange) -> bool {
self.base == other.base
}
}
impl Ord for BusRange {
fn cmp(&self, other: &BusRange) -> Ordering {
self.base.cmp(&other.base)
}
}
impl PartialOrd for BusRange {
fn partial_cmp(&self, other: &BusRange) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// A device container for routing reads and writes over some address space.
///
/// This doesn't have any restrictions on what kind of device or address space this applies to. The
/// only restriction is that no two devices can overlap in this address space.
#[derive(Default, Debug)]
pub struct Bus {
devices: RwLock<BTreeMap<BusRange, Weak<dyn BusDeviceSync>>>,
}
impl Bus {
/// Constructs an a bus with an empty address space.
pub fn new() -> Bus {
Bus {
devices: RwLock::new(BTreeMap::new()),
}
}
fn first_before(&self, addr: u64) -> Option<(BusRange, Arc<dyn BusDeviceSync>)> {
let devices = self.devices.read().unwrap();
let (range, dev) = devices
.range(..=BusRange { base: addr, len: 1 })
.next_back()?;
dev.upgrade().map(|d| (*range, d.clone()))
}
#[allow(clippy::type_complexity)]
/// Get a reference to a device residing inside the bus at address [`addr`].
pub fn resolve(&self, addr: u64) -> Option<(u64, u64, Arc<dyn BusDeviceSync>)> {
if let Some((range, dev)) = self.first_before(addr) {
let offset = addr - range.base;
if offset < range.len {
return Some((range.base, offset, dev));
}
}
None
}
/// Insert a device into the [`Bus`] in the range [`addr`, `addr` + `len`].
pub fn insert(&self, device: Arc<dyn BusDeviceSync>, base: u64, len: u64) -> Result<()> {
if len == 0 {
return Err(BusError::ZeroSizedRange);
}
// Reject all cases where the new device's range overlaps with an existing device.
if self
.devices
.read()
.unwrap()
.iter()
.any(|(range, _dev)| range.overlaps(base, len))
{
return Err(BusError::Overlap);
}
if self
.devices
.write()
.unwrap()
.insert(BusRange { base, len }, Arc::downgrade(&device))
.is_some()
{
return Err(BusError::Overlap);
}
Ok(())
}
/// Removes the device at the given address space range.
pub fn remove(&self, base: u64, len: u64) -> Result<()> {
if len == 0 {
return Err(BusError::ZeroSizedRange);
}
let bus_range = BusRange { base, len };
if self.devices.write().unwrap().remove(&bus_range).is_none() {
return Err(BusError::MissingAddressRange);
}
Ok(())
}
/// Removes all entries referencing the given device.
pub fn remove_by_device(&self, device: &Arc<dyn BusDeviceSync>) -> Result<()> {
let mut device_list = self.devices.write().unwrap();
let mut remove_key_list = Vec::new();
for (key, value) in device_list.iter() {
if Arc::ptr_eq(&value.upgrade().unwrap(), device) {
remove_key_list.push(*key);
}
}
for key in remove_key_list.iter() {
device_list.remove(key);
}
Ok(())
}
/// Updates the address range for an existing device.
pub fn update_range(
&self,
old_base: u64,
old_len: u64,
new_base: u64,
new_len: u64,
) -> Result<()> {
// Retrieve the device corresponding to the range
let device = if let Some((_, _, dev)) = self.resolve(old_base) {
dev.clone()
} else {
return Err(BusError::MissingAddressRange);
};
// Remove the old address range
self.remove(old_base, old_len)?;
// Insert the new address range
self.insert(device, new_base, new_len)
}
/// Reads data from the device that owns the range containing `addr` and puts it into `data`.
///
/// Returns true on success, otherwise `data` is untouched.
pub fn read(&self, addr: u64, data: &mut [u8]) -> Result<()> {
if let Some((base, offset, dev)) = self.resolve(addr) {
// OK to unwrap as lock() failing is a serious error condition and should panic.
dev.read(base, offset, data);
Ok(())
} else {
Err(BusError::MissingAddressRange)
}
}
/// Writes `data` to the device that owns the range containing `addr`.
///
/// Returns true on success, otherwise `data` is untouched.
pub fn write(&self, addr: u64, data: &[u8]) -> Result<Option<Arc<Barrier>>> {
if let Some((base, offset, dev)) = self.resolve(addr) {
// OK to unwrap as lock() failing is a serious error condition and should panic.
Ok(dev.write(base, offset, data))
} else {
Err(BusError::MissingAddressRange)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
struct DummyDevice;
impl BusDeviceSync for DummyDevice {}
struct ConstantDevice;
impl BusDeviceSync for ConstantDevice {
#[allow(clippy::cast_possible_truncation)]
fn read(&self, _base: u64, offset: u64, data: &mut [u8]) {
for (i, v) in data.iter_mut().enumerate() {
*v = (offset as u8) + (i as u8);
}
}
#[allow(clippy::cast_possible_truncation)]
fn write(&self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
for (i, v) in data.iter().enumerate() {
assert_eq!(*v, (offset as u8) + (i as u8))
}
None
}
}
#[test]
fn bus_insert() {
let bus = Bus::new();
let dummy = Arc::new(DummyDevice);
bus.insert(dummy.clone(), 0x10, 0).unwrap_err();
bus.insert(dummy.clone(), 0x10, 0x10).unwrap();
let result = bus.insert(dummy.clone(), 0x0f, 0x10);
assert_eq!(format!("{result:?}"), "Err(Overlap)");
bus.insert(dummy.clone(), 0x10, 0x10).unwrap_err();
bus.insert(dummy.clone(), 0x10, 0x15).unwrap_err();
bus.insert(dummy.clone(), 0x12, 0x15).unwrap_err();
bus.insert(dummy.clone(), 0x12, 0x01).unwrap_err();
bus.insert(dummy.clone(), 0x0, 0x20).unwrap_err();
bus.insert(dummy.clone(), 0x20, 0x05).unwrap();
bus.insert(dummy.clone(), 0x25, 0x05).unwrap();
bus.insert(dummy, 0x0, 0x10).unwrap();
}
#[test]
fn bus_remove() {
let bus = Bus::new();
let dummy: Arc<dyn BusDeviceSync> = Arc::new(DummyDevice);
bus.remove(0x42, 0x0).unwrap_err();
bus.remove(0x13, 0x12).unwrap_err();
bus.insert(dummy.clone(), 0x13, 0x12).unwrap();
bus.remove(0x42, 0x42).unwrap_err();
bus.remove(0x13, 0x12).unwrap();
bus.insert(dummy.clone(), 0x16, 0x1).unwrap();
bus.remove_by_device(&dummy).unwrap();
bus.remove(0x16, 0x1).unwrap_err();
}
#[test]
#[allow(clippy::redundant_clone)]
fn bus_read_write() {
let bus = Bus::new();
let dummy = Arc::new(DummyDevice);
bus.insert(dummy.clone(), 0x10, 0x10).unwrap();
bus.read(0x10, &mut [0, 0, 0, 0]).unwrap();
bus.write(0x10, &[0, 0, 0, 0]).unwrap();
bus.read(0x11, &mut [0, 0, 0, 0]).unwrap();
bus.write(0x11, &[0, 0, 0, 0]).unwrap();
bus.read(0x16, &mut [0, 0, 0, 0]).unwrap();
bus.write(0x16, &[0, 0, 0, 0]).unwrap();
bus.read(0x20, &mut [0, 0, 0, 0]).unwrap_err();
bus.write(0x20, &[0, 0, 0, 0]).unwrap_err();
bus.read(0x06, &mut [0, 0, 0, 0]).unwrap_err();
bus.write(0x06, &[0, 0, 0, 0]).unwrap_err();
}
#[test]
#[allow(clippy::redundant_clone)]
fn bus_read_write_values() {
let bus = Bus::new();
let dummy = Arc::new(ConstantDevice);
bus.insert(dummy.clone(), 0x10, 0x10).unwrap();
let mut values = [0, 1, 2, 3];
bus.read(0x10, &mut values).unwrap();
assert_eq!(values, [0, 1, 2, 3]);
bus.write(0x10, &values).unwrap();
bus.read(0x15, &mut values).unwrap();
assert_eq!(values, [5, 6, 7, 8]);
bus.write(0x15, &values).unwrap();
}
#[test]
#[allow(clippy::redundant_clone)]
fn busrange_cmp() {
let range = BusRange { base: 0x10, len: 2 };
assert_eq!(range, BusRange { base: 0x10, len: 3 });
assert_eq!(range, BusRange { base: 0x10, len: 2 });
assert!(range < BusRange { base: 0x12, len: 1 });
assert!(range < BusRange { base: 0x12, len: 3 });
assert_eq!(range, range.clone());
let bus = Bus::new();
let mut data = [1, 2, 3, 4];
let device = Arc::new(DummyDevice);
bus.insert(device.clone(), 0x10, 0x10).unwrap();
bus.write(0x10, &data).unwrap();
bus.read(0x10, &mut data).unwrap();
assert_eq!(data, [1, 2, 3, 4]);
}
#[test]
fn bus_range_overlap() {
let a = BusRange {
base: 0x1000,
len: 0x400,
};
assert!(a.overlaps(0x1000, 0x400));
assert!(a.overlaps(0xf00, 0x400));
assert!(a.overlaps(0x1000, 0x01));
assert!(a.overlaps(0xfff, 0x02));
assert!(a.overlaps(0x1100, 0x100));
assert!(a.overlaps(0x13ff, 0x100));
assert!(!a.overlaps(0x1400, 0x100));
assert!(!a.overlaps(0xf00, 0x100));
}
#[test]
fn bus_update_range() {
let bus = Bus::new();
let dummy = Arc::new(DummyDevice);
bus.update_range(0x13, 0x12, 0x16, 0x1).unwrap_err();
bus.insert(dummy.clone(), 0x13, 12).unwrap();
bus.update_range(0x16, 0x1, 0x13, 0x12).unwrap_err();
bus.update_range(0x13, 0x12, 0x16, 0x1).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/templates.rs | src/vmm/src/cpu_config/templates.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#[cfg(target_arch = "x86_64")]
mod common_types {
pub use crate::cpu_config::x86_64::custom_cpu_template::CustomCpuTemplate;
pub use crate::cpu_config::x86_64::static_cpu_templates::StaticCpuTemplate;
pub use crate::cpu_config::x86_64::{
CpuConfiguration, CpuConfigurationError as GuestConfigError, test_utils,
};
}
#[cfg(target_arch = "aarch64")]
mod common_types {
pub use crate::cpu_config::aarch64::custom_cpu_template::CustomCpuTemplate;
pub use crate::cpu_config::aarch64::static_cpu_templates::StaticCpuTemplate;
pub use crate::cpu_config::aarch64::{
CpuConfiguration, CpuConfigurationError as GuestConfigError, test_utils,
};
}
use std::borrow::Cow;
use std::fmt::Debug;
pub use common_types::*;
use serde::de::Error as SerdeError;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Error for GetCpuTemplate trait.
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum GetCpuTemplateError {
#[cfg(target_arch = "x86_64")]
/// Failed to get CPU vendor information: {0}
GetCpuVendor(crate::cpu_config::x86_64::cpuid::common::GetCpuidError),
/// CPU vendor mismatched between actual CPU and CPU template.
CpuVendorMismatched,
/// Invalid static CPU template: {0}
InvalidStaticCpuTemplate(StaticCpuTemplate),
/// The current CPU model is not permitted to apply the CPU template.
InvalidCpuModel,
}
/// Trait to unwrap the inner [`CustomCpuTemplate`] from [`Option<CpuTemplateType>`].
///
/// This trait is needed because static CPU template and custom CPU template have different nested
/// structures: `CpuTemplateType::Static(StaticCpuTemplate::StaticTemplateType(CustomCpuTemplate))`
/// vs `CpuTemplateType::Custom(CustomCpuTemplate)`. As static CPU templates return owned
/// `CustomCpuTemplate`s, `Cow` is used here to avoid unnecessary clone of `CustomCpuTemplate` for
/// custom CPU templates and handle static CPU template and custom CPU template in a same manner.
pub trait GetCpuTemplate {
/// Get CPU template
fn get_cpu_template(&self) -> Result<Cow<'_, CustomCpuTemplate>, GetCpuTemplateError>;
}
/// Enum that represents types of cpu templates available.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CpuTemplateType {
/// Custom cpu template
Custom(CustomCpuTemplate),
/// Static cpu template
Static(StaticCpuTemplate),
}
// This conversion is only used for snapshot, but the static CPU template
// information has not been saved into snapshot since v1.1.
impl From<&Option<CpuTemplateType>> for StaticCpuTemplate {
fn from(value: &Option<CpuTemplateType>) -> Self {
match value {
Some(CpuTemplateType::Static(template)) => *template,
Some(CpuTemplateType::Custom(_)) | None => StaticCpuTemplate::None,
}
}
}
// This conversion is used when converting `&VmConfig` to `MachineConfig` to
// respond `GET /machine-config` and `GET /vm`.
impl From<&CpuTemplateType> for StaticCpuTemplate {
fn from(value: &CpuTemplateType) -> Self {
match value {
CpuTemplateType::Static(template) => *template,
CpuTemplateType::Custom(_) => StaticCpuTemplate::None,
}
}
}
impl TryFrom<&[u8]> for CustomCpuTemplate {
type Error = serde_json::Error;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
let template: CustomCpuTemplate = serde_json::from_slice(value)?;
template.validate()?;
Ok(template)
}
}
impl TryFrom<&str> for CustomCpuTemplate {
type Error = serde_json::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
CustomCpuTemplate::try_from(value.as_bytes())
}
}
/// Struct to represent user defined kvm capability.
/// Users can add or remove kvm capabilities to be checked
/// by FC in addition to those FC checks by default.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum KvmCapability {
/// Add capability to the check list.
Add(u32),
/// Remove capability from the check list.
Remove(u32),
}
impl Serialize for KvmCapability {
/// Serialize KvmCapability into a string.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = match self {
KvmCapability::Add(cap) => format!("{cap}"),
KvmCapability::Remove(cap) => format!("!{cap}"),
};
serializer.serialize_str(&s)
}
}
impl<'de> Deserialize<'de> for KvmCapability {
/// Deserialize string into a KvmCapability.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let original_str = <String as Deserialize>::deserialize(deserializer)?;
let parse_err = |e| {
D::Error::custom(format!(
"Failed to parse string [{}] as a kvm capability - can not convert to numeric: {}",
original_str, e
))
};
match original_str.strip_prefix('!') {
Some(s) => {
let v = s.parse::<u32>().map_err(parse_err)?;
Ok(Self::Remove(v))
}
None => {
let v = original_str.parse::<u32>().map_err(parse_err)?;
Ok(Self::Add(v))
}
}
}
}
/// Bit-mapped value to adjust targeted bits of a register.
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash)]
pub struct RegisterValueFilter<V>
where
V: Numeric,
{
/// Filter to be used when writing the value bits.
pub filter: V,
/// Value to be applied.
pub value: V,
}
impl<V> RegisterValueFilter<V>
where
V: Numeric + Debug,
{
/// Applies filter to the value
#[inline]
pub fn apply(&self, value: V) -> V {
(value & !self.filter) | self.value
}
}
impl<V> Serialize for RegisterValueFilter<V>
where
V: Numeric + Debug,
{
/// Serialize combination of value and filter into a single tri state string
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut bitmap_str = Vec::with_capacity(V::BITS as usize + 2);
bitmap_str.push(b'0');
bitmap_str.push(b'b');
for i in (0..V::BITS).rev() {
match self.filter.bit(i) {
true => {
let val = self.value.bit(i);
bitmap_str.push(b'0' + u8::from(val));
}
false => bitmap_str.push(b'x'),
}
}
// # Safety:
// We know that bitmap_str contains only ASCII characters
let s = unsafe { std::str::from_utf8_unchecked(&bitmap_str) };
serializer.serialize_str(s)
}
}
impl<'de, V> Deserialize<'de> for RegisterValueFilter<V>
where
V: Numeric + Debug,
{
/// Deserialize a composite bitmap string into a value pair
/// input string: "010x"
/// result: {
/// filter: 1110
/// value: 0100
/// }
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let original_str = <String as Deserialize>::deserialize(deserializer)?;
let stripped_str = original_str.strip_prefix("0b").unwrap_or(&original_str);
let (mut filter, mut value) = (V::zero(), V::zero());
let mut i = 0;
for s in stripped_str.as_bytes().iter().rev() {
if V::BITS == i {
return Err(D::Error::custom(format!(
"Failed to parse string [{}] as a bitmap - string is too long",
original_str
)));
}
match s {
b'_' => continue,
b'x' => {}
b'0' => {
filter |= V::one() << i;
}
b'1' => {
filter |= V::one() << i;
value |= V::one() << i;
}
c => {
return Err(D::Error::custom(format!(
"Failed to parse string [{}] as a bitmap - unknown character: {}",
original_str, c
)));
}
}
i += 1;
}
Ok(RegisterValueFilter { filter, value })
}
}
/// Trait for numeric types
pub trait Numeric:
Sized
+ Copy
+ PartialEq<Self>
+ std::fmt::Binary
+ std::ops::Not<Output = Self>
+ std::ops::BitAnd<Output = Self>
+ std::ops::BitOr<Output = Self>
+ std::ops::BitOrAssign<Self>
+ std::ops::BitXor<Output = Self>
+ std::ops::Shl<u32, Output = Self>
+ std::ops::AddAssign<Self>
{
/// Number of bits for type
const BITS: u32;
/// Value of bit at pos
fn bit(&self, pos: u32) -> bool;
/// Returns 0 of the type
fn zero() -> Self;
/// Returns 1 of the type
fn one() -> Self;
}
macro_rules! impl_numeric {
($type:tt) => {
impl Numeric for $type {
const BITS: u32 = $type::BITS;
fn bit(&self, pos: u32) -> bool {
(self & (Self::one() << pos)) != 0
}
fn zero() -> Self {
0
}
fn one() -> Self {
1
}
}
};
}
impl_numeric!(u8);
impl_numeric!(u16);
impl_numeric!(u32);
impl_numeric!(u64);
impl_numeric!(u128);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_kvm_capability_serde() {
let kvm_cap = KvmCapability::Add(69);
let expected_str = "\"69\"";
let serialized = serde_json::to_string(&kvm_cap).unwrap();
assert_eq!(&serialized, expected_str);
let kvm_cap = KvmCapability::Remove(69);
let expected_str = "\"!69\"";
let serialized = serde_json::to_string(&kvm_cap).unwrap();
assert_eq!(&serialized, expected_str);
let serialized = "\"69\"";
let deserialized: KvmCapability = serde_json::from_str(serialized).unwrap();
assert_eq!(deserialized, KvmCapability::Add(69));
let serialized = "\"!69\"";
let deserialized: KvmCapability = serde_json::from_str(serialized).unwrap();
assert_eq!(deserialized, KvmCapability::Remove(69));
}
#[test]
fn test_register_value_filter_serde() {
let rvf = RegisterValueFilter::<u8> {
value: 0b01010101,
filter: 0b11110000,
};
let expected_str = "\"0b0101xxxx\"";
let serialized = serde_json::to_string(&rvf).unwrap();
assert_eq!(&serialized, expected_str);
let expected_rvf = RegisterValueFilter::<u8> {
value: 0b01010000,
filter: 0b11110000,
};
let deserialized: RegisterValueFilter<u8> = serde_json::from_str(&serialized).unwrap();
assert_eq!(deserialized, expected_rvf);
let serialized = "\"0b0_101_xx_xx\"";
let deserialized: RegisterValueFilter<u8> = serde_json::from_str(serialized).unwrap();
assert_eq!(deserialized, expected_rvf);
let serialized = "\"0b0_xϽ1_xx_xx\"";
let deserialized: Result<RegisterValueFilter<u8>, _> = serde_json::from_str(serialized);
deserialized.unwrap_err();
let serialized = "\"0b0000_0000_0\"";
let deserialized: Result<RegisterValueFilter<u8>, _> = serde_json::from_str(serialized);
deserialized.unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/test_utils.rs | src/vmm/src/cpu_config/test_utils.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::path::PathBuf;
use crate::cpu_config::templates::CustomCpuTemplate;
/// Get a static CPU template stored as a JSON file.
pub fn get_json_template(filename: &str) -> CustomCpuTemplate {
let json_path = [
env!("CARGO_MANIFEST_DIR"),
"../../tests/data/custom_cpu_templates",
filename,
]
.iter()
.collect::<PathBuf>();
serde_json::from_str(&std::fs::read_to_string(json_path).unwrap()).unwrap()
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/mod.rs | src/vmm/src/cpu_config/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Module with types used for custom CPU templates
pub mod templates;
/// Module with ser/de utils for custom CPU templates
pub mod templates_serde;
/// Module containing type implementations needed for x86 CPU configuration
#[cfg(target_arch = "x86_64")]
pub mod x86_64;
/// Module containing type implementations needed for aarch64 (ARM) CPU configuration
#[cfg(target_arch = "aarch64")]
pub mod aarch64;
#[cfg(test)]
pub(crate) mod test_utils;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/templates_serde.rs | src/vmm/src/cpu_config/templates_serde.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use serde::de::Error as SerdeError;
use serde::{Deserialize, Deserializer, Serializer};
/// Serializes number to hex
pub fn serialize_to_hex_str<S, N>(number: &N, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
N: std::fmt::LowerHex + Debug,
{
serializer.serialize_str(format!("{:#x}", number).as_str())
}
macro_rules! deserialize_from_str {
($name:ident, $type:tt) => {
/// Deserializes number from string.
/// Number can be in binary, hex or dec formats.
pub fn $name<'de, D>(deserializer: D) -> Result<$type, D::Error>
where
D: Deserializer<'de>,
{
let number_str = String::deserialize(deserializer)?;
let deserialized_number = if let Some(s) = number_str.strip_prefix("0b") {
$type::from_str_radix(s, 2)
} else if let Some(s) = number_str.strip_prefix("0x") {
$type::from_str_radix(s, 16)
} else {
return Err(D::Error::custom(format!(
"No supported number system prefix found in value [{}]. Make sure to prefix \
the number with '0x' for hexadecimal numbers or '0b' for binary numbers.",
number_str,
)));
}
.map_err(|err| {
D::Error::custom(format!(
"Failed to parse string [{}] as a number for CPU template - {:?}",
number_str, err
))
})?;
Ok(deserialized_number)
}
};
}
deserialize_from_str!(deserialize_from_str_u32, u32);
deserialize_from_str!(deserialize_from_str_u64, u64);
#[cfg(test)]
mod tests {
use serde::de::IntoDeserializer;
use serde::de::value::{Error, StrDeserializer};
use super::*;
#[test]
fn test_deserialize_from_str() {
let valid_string = "0b1000101";
let deserializer: StrDeserializer<Error> = valid_string.into_deserializer();
let valid_value = deserialize_from_str_u32(deserializer);
assert_eq!(valid_value.unwrap(), 69);
let valid_string = "0x0045";
let deserializer: StrDeserializer<Error> = valid_string.into_deserializer();
let valid_value = deserialize_from_str_u32(deserializer);
assert_eq!(valid_value.unwrap(), 69);
let invalid_string = "xϽ69";
let deserializer: StrDeserializer<Error> = invalid_string.into_deserializer();
let invalid_value = deserialize_from_str_u32(deserializer);
invalid_value.unwrap_err();
let invalid_string = "69";
let deserializer: StrDeserializer<Error> = invalid_string.into_deserializer();
let invalid_value = deserialize_from_str_u32(deserializer);
invalid_value.unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/custom_cpu_template.rs | src/vmm/src/cpu_config/x86_64/custom_cpu_template.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Guest config sub-module specifically useful for
/// config templates.
use std::borrow::Cow;
use serde::de::Error as SerdeError;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::arch::x86_64::cpu_model::{CpuModel, SKYLAKE_FMS};
use crate::cpu_config::templates::{
CpuTemplateType, GetCpuTemplate, GetCpuTemplateError, KvmCapability, RegisterValueFilter,
};
use crate::cpu_config::templates_serde::*;
use crate::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use crate::cpu_config::x86_64::cpuid::common::get_vendor_id_from_host;
use crate::cpu_config::x86_64::static_cpu_templates::{StaticCpuTemplate, c3, t2, t2a, t2cl, t2s};
use crate::logger::warn;
impl GetCpuTemplate for Option<CpuTemplateType> {
fn get_cpu_template(&self) -> Result<Cow<'_, CustomCpuTemplate>, GetCpuTemplateError> {
use GetCpuTemplateError::*;
match self {
Some(template_type) => match template_type {
CpuTemplateType::Custom(template) => Ok(Cow::Borrowed(template)),
CpuTemplateType::Static(template) => {
// Return early for `None` due to no valid vendor and CPU models.
if template == &StaticCpuTemplate::None {
return Err(InvalidStaticCpuTemplate(StaticCpuTemplate::None));
}
if &get_vendor_id_from_host().map_err(GetCpuVendor)?
!= template.get_supported_vendor()
{
return Err(CpuVendorMismatched);
}
let cpu_model = CpuModel::get_cpu_model();
if !template.get_supported_cpu_models().contains(&cpu_model) {
return Err(InvalidCpuModel);
}
match template {
StaticCpuTemplate::C3 => {
if cpu_model == SKYLAKE_FMS {
warn!(
"On processors that do not enumerate FBSDP_NO, PSDP_NO and \
SBDR_SSDP_NO on IA32_ARCH_CAPABILITIES MSR, the guest kernel \
does not apply the mitigation against MMIO stale data \
vulnerability."
);
}
Ok(Cow::Owned(c3::c3()))
}
StaticCpuTemplate::T2 => Ok(Cow::Owned(t2::t2())),
StaticCpuTemplate::T2S => Ok(Cow::Owned(t2s::t2s())),
StaticCpuTemplate::T2CL => Ok(Cow::Owned(t2cl::t2cl())),
StaticCpuTemplate::T2A => Ok(Cow::Owned(t2a::t2a())),
StaticCpuTemplate::None => unreachable!(), // Handled earlier
}
}
},
None => Ok(Cow::Owned(CustomCpuTemplate::default())),
}
}
}
/// CPUID register enumeration
#[allow(missing_docs)]
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)]
pub enum CpuidRegister {
Eax,
Ebx,
Ecx,
Edx,
}
/// Target register to be modified by a bitmap.
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct CpuidRegisterModifier {
/// CPUID register to be modified by the bitmap.
#[serde(
deserialize_with = "deserialize_cpuid_register",
serialize_with = "serialize_cpuid_register"
)]
pub register: CpuidRegister,
/// Bit mapping to be applied as a modifier to the
/// register's value at the address provided.
pub bitmap: RegisterValueFilter<u32>,
}
/// Composite type that holistically provides
/// the location of a specific register being used
/// in the context of a CPUID tree.
#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct CpuidLeafModifier {
/// Leaf value.
#[serde(
deserialize_with = "deserialize_from_str_u32",
serialize_with = "serialize_to_hex_str"
)]
pub leaf: u32,
/// Sub-Leaf value.
#[serde(
deserialize_with = "deserialize_from_str_u32",
serialize_with = "serialize_to_hex_str"
)]
pub subleaf: u32,
/// KVM feature flags for this leaf-subleaf.
#[serde(deserialize_with = "deserialize_kvm_cpuid_flags")]
pub flags: KvmCpuidFlags,
/// All registers to be modified under the sub-leaf.
pub modifiers: Vec<CpuidRegisterModifier>,
}
/// Wrapper type to containing x86_64 CPU config modifiers.
#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct CustomCpuTemplate {
/// Additional kvm capabilities to check before
/// configuring vcpus.
#[serde(default)]
pub kvm_capabilities: Vec<KvmCapability>,
/// Modifiers for CPUID configuration.
#[serde(default)]
pub cpuid_modifiers: Vec<CpuidLeafModifier>,
/// Modifiers for model specific registers.
#[serde(default)]
pub msr_modifiers: Vec<RegisterModifier>,
}
impl CustomCpuTemplate {
/// Get an iterator of MSR indices that are modified by the CPU template.
pub fn msr_index_iter(&self) -> impl ExactSizeIterator<Item = u32> + '_ {
self.msr_modifiers.iter().map(|modifier| modifier.addr)
}
/// Validate the correctness of the template.
pub fn validate(&self) -> Result<(), serde_json::Error> {
Ok(())
}
}
/// Wrapper of a mask defined as a bitmap to apply
/// changes to a given register's value.
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct RegisterModifier {
/// Pointer of the location to be bit mapped.
#[serde(
deserialize_with = "deserialize_from_str_u32",
serialize_with = "serialize_to_hex_str"
)]
pub addr: u32,
/// Bit mapping to be applied as a modifier to the
/// register's value at the address provided.
pub bitmap: RegisterValueFilter<u64>,
}
fn deserialize_kvm_cpuid_flags<'de, D>(deserializer: D) -> Result<KvmCpuidFlags, D::Error>
where
D: Deserializer<'de>,
{
let flag = u32::deserialize(deserializer)?;
Ok(KvmCpuidFlags(flag))
}
fn deserialize_cpuid_register<'de, D>(deserializer: D) -> Result<CpuidRegister, D::Error>
where
D: Deserializer<'de>,
{
let cpuid_register_str = String::deserialize(deserializer)?;
Ok(match cpuid_register_str.as_str() {
"eax" => CpuidRegister::Eax,
"ebx" => CpuidRegister::Ebx,
"ecx" => CpuidRegister::Ecx,
"edx" => CpuidRegister::Edx,
_ => {
return Err(D::Error::custom(
"Invalid CPUID register. Must be one of [eax, ebx, ecx, edx]",
));
}
})
}
fn serialize_cpuid_register<S>(cpuid_reg: &CpuidRegister, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match cpuid_reg {
CpuidRegister::Eax => serializer.serialize_str("eax"),
CpuidRegister::Ebx => serializer.serialize_str("ebx"),
CpuidRegister::Ecx => serializer.serialize_str("ecx"),
CpuidRegister::Edx => serializer.serialize_str("edx"),
}
}
#[cfg(test)]
mod tests {
use serde_json::Value;
use super::*;
use crate::cpu_config::x86_64::test_utils::{TEST_TEMPLATE_JSON, build_test_template};
#[test]
fn test_get_cpu_template_with_no_template() {
// Test `get_cpu_template()` when no template is provided. The empty owned
// `CustomCpuTemplate` should be returned.
let cpu_template = None;
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Owned(CustomCpuTemplate::default()),
);
}
#[test]
fn test_get_cpu_template_with_c3_static_template() {
// Test `get_cpu_template()` when C3 static CPU template is specified. The owned
// `CustomCpuTemplate` should be returned if CPU vendor is Intel and the CPU model is
// supported. Otherwise, it should fail.
let c3 = StaticCpuTemplate::C3;
let cpu_template = Some(CpuTemplateType::Static(c3));
if &get_vendor_id_from_host().unwrap() == c3.get_supported_vendor() {
if c3
.get_supported_cpu_models()
.contains(&CpuModel::get_cpu_model())
{
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Owned(c3::c3())
);
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::InvalidCpuModel,
);
}
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::CpuVendorMismatched,
);
}
}
#[test]
fn test_get_cpu_template_with_t2_static_template() {
// Test `get_cpu_template()` when T2 static CPU template is specified. The owned
// `CustomCpuTemplate` should be returned if CPU vendor is Intel and the CPU model is
// supported. Otherwise, it should fail.
let t2 = StaticCpuTemplate::T2;
let cpu_template = Some(CpuTemplateType::Static(t2));
if &get_vendor_id_from_host().unwrap() == t2.get_supported_vendor() {
if t2
.get_supported_cpu_models()
.contains(&CpuModel::get_cpu_model())
{
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Owned(t2::t2())
);
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::InvalidCpuModel,
);
}
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::CpuVendorMismatched,
);
}
}
#[test]
fn test_get_cpu_template_with_t2s_static_template() {
// Test `get_cpu_template()` when T2S static CPU template is specified. The owned
// `CustomCpuTemplate` should be returned if CPU vendor is Intel and the CPU model is
// supported. Otherwise, it should fail.
let t2s = StaticCpuTemplate::T2S;
let cpu_template = Some(CpuTemplateType::Static(t2s));
if &get_vendor_id_from_host().unwrap() == t2s.get_supported_vendor() {
if t2s
.get_supported_cpu_models()
.contains(&CpuModel::get_cpu_model())
{
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Owned(t2s::t2s())
);
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::InvalidCpuModel,
);
}
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::CpuVendorMismatched,
);
}
}
#[test]
fn test_t2cl_template_equality() {
// For coverage purposes, this test forces usage of T2CL and bypasses
// validation that is generally applied which usually enforces that T2CL
// can only be used on Cascade Lake (or newer) CPUs.
let t2cl_custom_template = CpuTemplateType::Custom(t2cl::t2cl());
// This test also demonstrates the difference in concept between custom and static
// templates, while practically T2CL is consistent for the user, in code
// the static template of T2CL, and the custom template of T2CL are not equivalent.
assert_ne!(
t2cl_custom_template,
CpuTemplateType::Static(StaticCpuTemplate::T2CL)
);
}
#[test]
fn test_get_cpu_template_with_t2cl_static_template() {
// Test `get_cpu_template()` when T2CL static CPU template is specified. The owned
// `CustomCpuTemplate` should be returned if CPU vendor is Intel and the CPU model is
// supported. Otherwise, it should fail.
let t2cl = StaticCpuTemplate::T2CL;
let cpu_template = Some(CpuTemplateType::Static(t2cl));
if &get_vendor_id_from_host().unwrap() == t2cl.get_supported_vendor() {
if t2cl
.get_supported_cpu_models()
.contains(&CpuModel::get_cpu_model())
{
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Owned(t2cl::t2cl())
);
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::InvalidCpuModel,
);
}
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::CpuVendorMismatched,
);
}
}
#[test]
fn test_get_cpu_template_with_t2a_static_template() {
// Test `get_cpu_template()` when T2A static CPU template is specified. The owned
// `CustomCpuTemplate` should be returned if CPU vendor is AMD. Otherwise it should fail.
let t2a = StaticCpuTemplate::T2A;
let cpu_template = Some(CpuTemplateType::Static(t2a));
if &get_vendor_id_from_host().unwrap() == t2a.get_supported_vendor() {
if t2a
.get_supported_cpu_models()
.contains(&CpuModel::get_cpu_model())
{
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Owned(t2a::t2a())
);
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::InvalidCpuModel,
);
}
} else {
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::CpuVendorMismatched,
);
}
}
#[test]
fn test_get_cpu_template_with_none_static_template() {
// Test `get_cpu_template()` when no static CPU template is provided.
// `InvalidStaticCpuTemplate` error should be returned because it is no longer valid and
// was replaced with `None` of `Option<CpuTemplateType>`.
let cpu_template = Some(CpuTemplateType::Static(StaticCpuTemplate::None));
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::InvalidStaticCpuTemplate(StaticCpuTemplate::None)
);
// Test the Display for StaticCpuTemplate
assert_eq!(format!("{}", StaticCpuTemplate::None), "None");
}
#[test]
fn test_get_cpu_template_with_custom_template() {
// Test `get_cpu_template()` when a custom CPU template is provided. The borrowed
// `CustomCpuTemplate` should be returned.
let inner_cpu_template = CustomCpuTemplate::default();
let cpu_template = Some(CpuTemplateType::Custom(inner_cpu_template.clone()));
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Borrowed(&inner_cpu_template)
);
}
#[test]
fn test_malformed_json() {
// Misspelled field name, register
let cpu_template_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"cpuid_modifiers": [
{
"leaf": "0x80000001",
"subleaf": "0b000111",
"flags": 0,
"modifiers": [
{
"register": "ekx",
"bitmap": "0bx00100xxx1xxxxxxxxxxxxxxxxxxxxx1"
}
]
},
],
}"#,
);
assert!(
cpu_template_result
.unwrap_err()
.to_string()
.contains("Invalid CPUID register. Must be one of [eax, ebx, ecx, edx]")
);
// Malformed MSR register address
let cpu_template_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"msr_modifiers": [
{
"addr": "0jj0",
"bitmap": "0bx00100xxx1xxxx00xxx1xxxxxxxxxxx1"
},
]
}"#,
);
let error_msg: String = cpu_template_result.unwrap_err().to_string();
// Formatted error expected clarifying the number system prefix is missing
assert!(
error_msg.contains("No supported number system prefix found in value"),
"{}",
error_msg
);
// Malformed CPUID leaf address
let cpu_template_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"cpuid_modifiers": [
{
"leaf": "k",
"subleaf": "0b000111",
"flags": 0,
"modifiers": [
{
"register": "eax",
"bitmap": "0bx00100xxx1xxxxxxxxxxxxxxxxxxxxx1"
}
]
},
],
}"#,
);
let error_msg: String = cpu_template_result.unwrap_err().to_string();
// Formatted error expected clarifying the number system prefix is missing
assert!(
error_msg.contains("No supported number system prefix found in value"),
"{}",
error_msg
);
// Malformed 64-bit bitmap - filter failed
let cpu_template_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"msr_modifiers": [
{
"addr": "0x200",
"bitmap": "0bx0?1_0_0x_?x1xxxx00xxx1xxxxxxxxxxx1"
},
]
}"#,
);
assert!(cpu_template_result.unwrap_err().to_string().contains(
"Failed to parse string [0bx0?1_0_0x_?x1xxxx00xxx1xxxxxxxxxxx1] as a bitmap"
));
// Malformed 64-bit bitmap - value failed
let cpu_template_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"msr_modifiers": [
{
"addr": "0x200",
"bitmap": "0bx00100x0x1xxxx05xxx1xxxxxxxxxxx1"
},
]
}"#,
);
assert!(
cpu_template_result.unwrap_err().to_string().contains(
"Failed to parse string [0bx00100x0x1xxxx05xxx1xxxxxxxxxxx1] as a bitmap"
)
);
}
#[test]
fn test_deserialization_lifecycle() {
let cpu_template = serde_json::from_str::<CustomCpuTemplate>(TEST_TEMPLATE_JSON)
.expect("Failed to deserialize custom CPU template.");
assert_eq!(5, cpu_template.cpuid_modifiers.len());
assert_eq!(4, cpu_template.msr_modifiers.len());
}
#[test]
fn test_serialization_lifecycle() {
let template = build_test_template();
let template_json_str_result = serde_json::to_string_pretty(&template);
let template_json = template_json_str_result.unwrap();
let deserialization_result = serde_json::from_str::<CustomCpuTemplate>(&template_json);
assert_eq!(template, deserialization_result.unwrap());
}
/// Test to confirm that templates for different CPU architectures have
/// a size bitmask that is supported by the architecture when serialized to JSON.
#[test]
fn test_bitmap_width() {
let mut cpuid_checked = false;
let mut msr_checked = false;
let template = build_test_template();
let x86_template_str =
serde_json::to_string(&template).expect("Error serializing x86 template");
let json_tree: Value = serde_json::from_str(&x86_template_str)
.expect("Error deserializing x86 template JSON string");
// Check that bitmaps for CPUID values are 32-bits in width
if let Some(cpuid_modifiers_root) = json_tree.get("cpuid_modifiers") {
let cpuid_mod_node = &cpuid_modifiers_root.as_array().unwrap()[0];
if let Some(modifiers_node) = cpuid_mod_node.get("modifiers") {
let mod_node = &modifiers_node.as_array().unwrap()[0];
if let Some(bit_map_str) = mod_node.get("bitmap") {
// 32-bit width with a "0b" prefix for binary-formatted numbers
assert_eq!(bit_map_str.as_str().unwrap().len(), 34);
cpuid_checked = true;
}
}
}
// Check that bitmaps for MSRs are 64-bits in width
if let Some(msr_modifiers_root) = json_tree.get("msr_modifiers") {
let msr_mod_node = &msr_modifiers_root.as_array().unwrap()[0];
if let Some(bit_map_str) = msr_mod_node.get("bitmap") {
// 64-bit width with a "0b" prefix for binary-formatted numbers
assert_eq!(bit_map_str.as_str().unwrap().len(), 66);
assert!(bit_map_str.as_str().unwrap().starts_with("0b"));
msr_checked = true;
}
}
assert!(
cpuid_checked,
"CPUID bitmap width in a x86_64 template was not tested."
);
assert!(
msr_checked,
"MSR bitmap width in a x86_64 template was not tested."
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/test_utils.rs | src/vmm/src/cpu_config/x86_64/test_utils.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use super::custom_cpu_template::{
CpuidLeafModifier, CpuidRegister, CpuidRegisterModifier, RegisterModifier,
};
use crate::cpu_config::templates::{CustomCpuTemplate, RegisterValueFilter};
use crate::cpu_config::x86_64::cpuid::KvmCpuidFlags;
/// Test CPU template in JSON format
pub const TEST_TEMPLATE_JSON: &str = r#"{
"cpuid_modifiers": [
{
"leaf": "0x80000001",
"subleaf": "0x0007",
"flags": 0,
"modifiers": [
{
"register": "eax",
"bitmap": "0bx00100xxx1xxxxxxxxxxxxxxxxxxxxx1"
}
]
},
{
"leaf": "0x80000002",
"subleaf": "0x0004",
"flags": 0,
"modifiers": [
{
"register": "ebx",
"bitmap": "0bxxx1xxxxxxxxxxxxxxxxxxxxx1"
},
{
"register": "ecx",
"bitmap": "0bx00100xxx1xxxxxxxxxxx0xxxxx0xxx1"
}
]
},
{
"leaf": "0x80000003",
"subleaf": "0x0004",
"flags": 0,
"modifiers": [
{
"register": "edx",
"bitmap": "0bx00100xxx1xxxxxxxxxxx0xxxxx0xxx1"
}
]
},
{
"leaf": "0x80000004",
"subleaf": "0x0004",
"flags": 0,
"modifiers": [
{
"register": "edx",
"bitmap": "0b00100xxx1xxxxxx1xxxxxxxxxxxxxx1"
},
{
"register": "ecx",
"bitmap": "0bx00100xxx1xxxxxxxxxxxxx111xxxxx1"
}
]
},
{
"leaf": "0x80000005",
"subleaf": "0x0004",
"flags": 0,
"modifiers": [
{
"register": "eax",
"bitmap": "0bx00100xxx1xxxxx00xxxxxx000xxxxx1"
},
{
"register": "edx",
"bitmap": "0bx10100xxx1xxxxxxxxxxxxx000xxxxx1"
}
]
}
],
"msr_modifiers": [
{
"addr": "0x0",
"bitmap": "0bx00100xxx1xxxx00xxx1xxxxxxxxxxx1"
},
{
"addr": "0x1",
"bitmap": "0bx00111xxx1xxxx111xxxxx101xxxxxx1"
},
{
"addr": "0b11",
"bitmap": "0bx00100xxx1xxxxxx0000000xxxxxxxx1"
},
{
"addr": "0xbbca",
"bitmap": "0bx00100xxx1xxxxxxxxx1"
}
]
}"#;
/// Test CPU template in JSON format but has an invalid field for the architecture.
/// "reg_modifiers" is the field name for the registers for aarch64"
pub const TEST_INVALID_TEMPLATE_JSON: &str = r#"{
"reg_modifiers": [
{
"addr": "0x0AAC",
"bitmap": "0b1xx1"
}
]
}"#;
/// Builds a sample custom CPU template
pub fn build_test_template() -> CustomCpuTemplate {
CustomCpuTemplate {
cpuid_modifiers: vec![CpuidLeafModifier {
leaf: 0x3,
subleaf: 0x0,
flags: KvmCpuidFlags(kvm_bindings::KVM_CPUID_FLAG_STATEFUL_FUNC),
modifiers: vec![
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0111,
value: 0b0101,
},
},
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b0111,
value: 0b0100,
},
},
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0111,
value: 0b0111,
},
},
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0111,
value: 0b0001,
},
},
],
}],
msr_modifiers: vec![
RegisterModifier {
addr: 0x9999,
bitmap: RegisterValueFilter {
filter: 0,
value: 0,
},
},
RegisterModifier {
addr: 0x8000,
bitmap: RegisterValueFilter {
filter: 0,
value: 0,
},
},
],
..Default::default()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/mod.rs | src/vmm/src/cpu_config/x86_64/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Module for CPUID instruction related content
pub mod cpuid;
/// Module for custom CPU templates
pub mod custom_cpu_template;
/// Module for static CPU templates
pub mod static_cpu_templates;
/// Module with test utils for custom CPU templates
pub mod test_utils;
use std::collections::BTreeMap;
use kvm_bindings::CpuId;
use self::custom_cpu_template::CpuidRegister;
use super::templates::CustomCpuTemplate;
use crate::Vcpu;
use crate::cpu_config::x86_64::cpuid::{Cpuid, CpuidKey};
/// Errors thrown while configuring templates.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum CpuConfigurationError {
/// Template changes a CPUID entry not supported by KVM: Leaf: {0:0x}, Subleaf: {1:0x}
CpuidFeatureNotSupported(u32, u32),
/// Template changes an MSR entry not supported by KVM: Register Address: {0:0x}
MsrNotSupported(u32),
/// Can create cpuid from raw: {0}
CpuidFromKvmCpuid(#[from] crate::cpu_config::x86_64::cpuid::CpuidTryFromKvmCpuid),
/// KVM vcpu ioctl failed: {0}
VcpuIoctl(#[from] crate::vstate::vcpu::KvmVcpuError),
}
/// CPU configuration for x86_64 CPUs
#[derive(Debug, Clone, PartialEq)]
pub struct CpuConfiguration {
/// CPUID configuration
pub cpuid: Cpuid,
/// Register values as a key pair for model specific registers
/// Key: MSR address
/// Value: MSR value
pub msrs: BTreeMap<u32, u64>,
}
impl CpuConfiguration {
/// Create new CpuConfiguration.
pub fn new(
supported_cpuid: CpuId,
cpu_template: &CustomCpuTemplate,
first_vcpu: &Vcpu,
) -> Result<Self, CpuConfigurationError> {
let cpuid = cpuid::Cpuid::try_from(supported_cpuid)?;
let msrs = first_vcpu
.kvm_vcpu
.get_msrs(cpu_template.msr_index_iter())?;
Ok(CpuConfiguration { cpuid, msrs })
}
/// Modifies provided config with changes from template
pub fn apply_template(
self,
template: &CustomCpuTemplate,
) -> Result<Self, CpuConfigurationError> {
let Self {
mut cpuid,
mut msrs,
} = self;
let guest_cpuid = cpuid.inner_mut();
// Apply CPUID modifiers
for mod_leaf in template.cpuid_modifiers.iter() {
let cpuid_key = CpuidKey {
leaf: mod_leaf.leaf,
subleaf: mod_leaf.subleaf,
};
if let Some(entry) = guest_cpuid.get_mut(&cpuid_key) {
entry.flags = mod_leaf.flags;
// Can we modify one reg multiple times????
for mod_reg in &mod_leaf.modifiers {
match mod_reg.register {
CpuidRegister::Eax => {
entry.result.eax = mod_reg.bitmap.apply(entry.result.eax)
}
CpuidRegister::Ebx => {
entry.result.ebx = mod_reg.bitmap.apply(entry.result.ebx)
}
CpuidRegister::Ecx => {
entry.result.ecx = mod_reg.bitmap.apply(entry.result.ecx)
}
CpuidRegister::Edx => {
entry.result.edx = mod_reg.bitmap.apply(entry.result.edx)
}
}
}
} else {
return Err(CpuConfigurationError::CpuidFeatureNotSupported(
cpuid_key.leaf,
cpuid_key.subleaf,
));
}
}
for modifier in &template.msr_modifiers {
if let Some(reg_value) = msrs.get_mut(&modifier.addr) {
*reg_value = modifier.bitmap.apply(*reg_value);
} else {
return Err(CpuConfigurationError::MsrNotSupported(modifier.addr));
}
}
Ok(Self { cpuid, msrs })
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use kvm_bindings::KVM_CPUID_FLAG_STATEFUL_FUNC;
use super::custom_cpu_template::{CpuidLeafModifier, CpuidRegisterModifier, RegisterModifier};
use super::*;
use crate::cpu_config::templates::RegisterValueFilter;
use crate::cpu_config::x86_64::cpuid::{CpuidEntry, IntelCpuid, KvmCpuidFlags};
fn build_test_template() -> CustomCpuTemplate {
CustomCpuTemplate {
cpuid_modifiers: vec![CpuidLeafModifier {
leaf: 0x3,
subleaf: 0x0,
flags: KvmCpuidFlags(KVM_CPUID_FLAG_STATEFUL_FUNC),
modifiers: vec![
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0111,
value: 0b0101,
},
},
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b0111,
value: 0b0100,
},
},
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0111,
value: 0b0111,
},
},
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0111,
value: 0b0001,
},
},
],
}],
msr_modifiers: vec![
RegisterModifier {
addr: 0x9999,
bitmap: RegisterValueFilter {
filter: 0,
value: 0,
},
},
RegisterModifier {
addr: 0x8000,
bitmap: RegisterValueFilter {
filter: 0,
value: 0,
},
},
],
..Default::default()
}
}
fn build_supported_cpuid() -> Cpuid {
Cpuid::Intel(IntelCpuid(BTreeMap::from([(
CpuidKey {
leaf: 0x3,
subleaf: 0x0,
},
CpuidEntry::default(),
)])))
}
fn empty_cpu_config() -> CpuConfiguration {
CpuConfiguration {
cpuid: Cpuid::Intel(IntelCpuid(BTreeMap::new())),
msrs: Default::default(),
}
}
fn supported_cpu_config() -> CpuConfiguration {
CpuConfiguration {
cpuid: build_supported_cpuid(),
msrs: BTreeMap::from([(0x8000, 0b1000), (0x9999, 0b1010)]),
}
}
fn unsupported_cpu_config() -> CpuConfiguration {
CpuConfiguration {
cpuid: build_supported_cpuid(),
msrs: BTreeMap::from([(0x8000, 0b1000), (0x8001, 0b1010)]),
}
}
#[test]
fn test_empty_template() {
let host_configuration = empty_cpu_config();
let cpu_config_result = host_configuration
.clone()
.apply_template(&CustomCpuTemplate::default());
assert!(
cpu_config_result.is_ok(),
"{}",
cpu_config_result.unwrap_err()
);
// CPUID will be comparable, but not MSRs.
// The configuration will be configuration required by the template,
// not a holistic view of all registers.
assert_eq!(cpu_config_result.unwrap().cpuid, host_configuration.cpuid);
}
#[test]
fn test_apply_template() {
let host_configuration = supported_cpu_config();
let cpu_config_result = host_configuration
.clone()
.apply_template(&build_test_template());
assert!(
cpu_config_result.is_ok(),
"{}",
cpu_config_result.unwrap_err()
);
assert_ne!(cpu_config_result.unwrap(), host_configuration);
}
/// Invalid test in this context is when the template
/// has modifiers for registers that are not supported.
#[test]
fn test_invalid_template() {
// Test CPUID validation
let host_configuration = empty_cpu_config();
let guest_template = build_test_template();
let cpu_config_result = host_configuration.apply_template(&guest_template);
assert!(
cpu_config_result.is_err(),
"Expected an error as template should have failed to modify a CPUID entry that is not \
supported by host configuration",
);
assert_eq!(
cpu_config_result.unwrap_err(),
CpuConfigurationError::CpuidFeatureNotSupported(
guest_template.cpuid_modifiers[0].leaf,
guest_template.cpuid_modifiers[0].subleaf
)
);
// Test MSR validation
let host_configuration = unsupported_cpu_config();
let guest_template = build_test_template();
let cpu_config_result = host_configuration.apply_template(&guest_template);
assert!(
cpu_config_result.is_err(),
"Expected an error as template should have failed to modify an MSR value that is not \
supported by host configuration",
);
assert_eq!(
cpu_config_result.unwrap_err(),
CpuConfigurationError::MsrNotSupported(guest_template.msr_modifiers[0].addr)
)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/cpuid/mod.rs | src/vmm/src/cpu_config/x86_64/cpuid/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
#![warn(clippy::pedantic)]
#![allow(
clippy::blanket_clippy_restriction_lints,
clippy::implicit_return,
clippy::pattern_type_mismatch,
clippy::std_instead_of_alloc,
clippy::std_instead_of_core,
clippy::pub_use,
clippy::non_ascii_literal,
clippy::single_char_lifetime_names,
clippy::exhaustive_enums,
clippy::exhaustive_structs,
clippy::unseparated_literal_suffix,
clippy::mod_module_files,
clippy::missing_trait_methods
)]
// Apply CPUID specific lint adjustments.
#![allow(
clippy::unreadable_literal,
clippy::similar_names,
clippy::same_name_method,
clippy::doc_markdown,
clippy::module_name_repetitions
)]
//! Utility for configuring the CPUID (CPU identification) for the guest microVM.
use std::convert::TryFrom;
use std::mem::{size_of, transmute};
/// cpuid utility functions.
pub mod common;
/// AMD CPUID specification handling.
pub mod amd;
pub use amd::AmdCpuid;
/// Intel CPUID specification handling.
pub mod intel;
pub use intel::IntelCpuid;
/// CPUID normalize implementation.
mod normalize;
pub use normalize::{FeatureInformationError, GetMaxCpusPerPackageError, NormalizeCpuidError};
/// Intel brand string.
pub const VENDOR_ID_INTEL: &[u8; 12] = b"GenuineIntel";
/// AMD brand string.
pub const VENDOR_ID_AMD: &[u8; 12] = b"AuthenticAMD";
/// Intel brand string.
#[allow(clippy::undocumented_unsafe_blocks)]
pub const VENDOR_ID_INTEL_STR: &str = unsafe { std::str::from_utf8_unchecked(VENDOR_ID_INTEL) };
/// AMD brand string.
#[allow(clippy::undocumented_unsafe_blocks)]
pub const VENDOR_ID_AMD_STR: &str = unsafe { std::str::from_utf8_unchecked(VENDOR_ID_AMD) };
/// To store the brand string we have 3 leaves, each with 4 registers, each with 4 bytes.
pub const BRAND_STRING_LENGTH: usize = 3 * 4 * 4;
/// Mimic of [`std::arch::x86_64::__cpuid`] that wraps [`cpuid_count`].
fn cpuid(leaf: u32) -> std::arch::x86_64::CpuidResult {
cpuid_count(leaf, 0)
}
/// Safe wrapper around [`std::arch::x86_64::__cpuid_count`].
fn cpuid_count(leaf: u32, subleaf: u32) -> std::arch::x86_64::CpuidResult {
// JUSTIFICATION: There is no safe alternative.
// SAFETY: The `cfg(cpuid)` wrapping the `cpuid` module guarantees `CPUID` is supported.
unsafe { std::arch::x86_64::__cpuid_count(leaf, subleaf) }
}
/// Gets the Intel default brand.
// As we pass through host frequency, we require CPUID and thus `cfg(cpuid)`.
/// Gets host brand string.
///
/// Its stored in-order with bytes flipped in each register e.g.:
/// ```text
/// "etnI" | ")4(l" | "oeX " | ")R(n" |
/// "orP " | "ssec" | "@ ro" | "0.3 " |
/// "zHG0" | null | null | null
/// ------------------------------------
/// Intel(R) Xeon(R) Processor @ 3.00Ghz
/// ```
#[inline]
#[must_use]
pub fn host_brand_string() -> [u8; BRAND_STRING_LENGTH] {
let leaf_a = cpuid(0x80000002);
let leaf_b = cpuid(0x80000003);
let leaf_c = cpuid(0x80000004);
let arr = [
leaf_a.eax, leaf_a.ebx, leaf_a.ecx, leaf_a.edx, leaf_b.eax, leaf_b.ebx, leaf_b.ecx,
leaf_b.edx, leaf_c.eax, leaf_c.ebx, leaf_c.ecx, leaf_c.edx,
];
// JUSTIFICATION: There is no safe alternative.
// SAFETY: Transmuting `[u32;12]` to `[u8;BRAND_STRING_LENGTH]` (`[u8;48]`) is always safe.
unsafe { std::mem::transmute(arr) }
}
/// Trait defining shared behaviour between CPUID structures.
pub trait CpuidTrait {
/// Returns the CPUID manufacturers ID (e.g. `GenuineIntel` or `AuthenticAMD`) or `None` if it
/// cannot be found in CPUID (e.g. leaf 0x0 is missing).
#[inline]
#[must_use]
fn vendor_id(&self) -> Option<[u8; 12]> {
let leaf_0 = self.get(&CpuidKey::leaf(0x0))?;
// The ordering of the vendor string is ebx,edx,ecx this is not a mistake.
let (ebx, edx, ecx) = (
leaf_0.result.ebx.to_ne_bytes(),
leaf_0.result.edx.to_ne_bytes(),
leaf_0.result.ecx.to_ne_bytes(),
);
let arr: [u8; 12] = [
ebx[0], ebx[1], ebx[2], ebx[3], edx[0], edx[1], edx[2], edx[3], ecx[0], ecx[1], ecx[2],
ecx[3],
];
Some(arr)
}
/// Gets a given sub-leaf.
fn get(&self, key: &CpuidKey) -> Option<&CpuidEntry>;
/// Gets a given sub-leaf.
fn get_mut(&mut self, key: &CpuidKey) -> Option<&mut CpuidEntry>;
/// Applies a given brand string to CPUID.
///
/// # Errors
///
/// When any of the leaves 0x80000002, 0x80000003 or 0x80000004 are not present.
#[inline]
fn apply_brand_string(
&mut self,
brand_string: &[u8; BRAND_STRING_LENGTH],
) -> Result<(), MissingBrandStringLeaves> {
// 0x80000002
{
let leaf: &mut CpuidEntry = self
.get_mut(&CpuidKey::leaf(0x80000002))
.ok_or(MissingBrandStringLeaves)?;
leaf.result.eax = u32::from_ne_bytes([
brand_string[0],
brand_string[1],
brand_string[2],
brand_string[3],
]);
leaf.result.ebx = u32::from_ne_bytes([
brand_string[4],
brand_string[5],
brand_string[6],
brand_string[7],
]);
leaf.result.ecx = u32::from_ne_bytes([
brand_string[8],
brand_string[9],
brand_string[10],
brand_string[11],
]);
leaf.result.edx = u32::from_ne_bytes([
brand_string[12],
brand_string[13],
brand_string[14],
brand_string[15],
]);
}
// 0x80000003
{
let leaf: &mut CpuidEntry = self
.get_mut(&CpuidKey::leaf(0x80000003))
.ok_or(MissingBrandStringLeaves)?;
leaf.result.eax = u32::from_ne_bytes([
brand_string[16],
brand_string[17],
brand_string[18],
brand_string[19],
]);
leaf.result.ebx = u32::from_ne_bytes([
brand_string[20],
brand_string[21],
brand_string[22],
brand_string[23],
]);
leaf.result.ecx = u32::from_ne_bytes([
brand_string[24],
brand_string[25],
brand_string[26],
brand_string[27],
]);
leaf.result.edx = u32::from_ne_bytes([
brand_string[28],
brand_string[29],
brand_string[30],
brand_string[31],
]);
}
// 0x80000004
{
let leaf: &mut CpuidEntry = self
.get_mut(&CpuidKey::leaf(0x80000004))
.ok_or(MissingBrandStringLeaves)?;
leaf.result.eax = u32::from_ne_bytes([
brand_string[32],
brand_string[33],
brand_string[34],
brand_string[35],
]);
leaf.result.ebx = u32::from_ne_bytes([
brand_string[36],
brand_string[37],
brand_string[38],
brand_string[39],
]);
leaf.result.ecx = u32::from_ne_bytes([
brand_string[40],
brand_string[41],
brand_string[42],
brand_string[43],
]);
leaf.result.edx = u32::from_ne_bytes([
brand_string[44],
brand_string[45],
brand_string[46],
brand_string[47],
]);
}
Ok(())
}
}
impl CpuidTrait for kvm_bindings::CpuId {
/// Gets a given sub-leaf.
#[allow(clippy::transmute_ptr_to_ptr, clippy::unwrap_used)]
#[inline]
fn get(&self, CpuidKey { leaf, subleaf }: &CpuidKey) -> Option<&CpuidEntry> {
let entry_opt = self
.as_slice()
.iter()
.find(|entry| entry.function == *leaf && entry.index == *subleaf);
entry_opt.map(|entry| {
// JUSTIFICATION: There is no safe alternative.
// SAFETY: The `kvm_cpuid_entry2` and `CpuidEntry` are `repr(C)` with known sizes.
unsafe {
let arr: &[u8; size_of::<kvm_bindings::kvm_cpuid_entry2>()] = transmute(entry);
let arr2: &[u8; size_of::<CpuidEntry>()] = arr[8..28].try_into().unwrap();
transmute::<_, &CpuidEntry>(arr2)
}
})
}
/// Gets a given sub-leaf.
#[allow(clippy::transmute_ptr_to_ptr, clippy::unwrap_used)]
#[inline]
fn get_mut(&mut self, CpuidKey { leaf, subleaf }: &CpuidKey) -> Option<&mut CpuidEntry> {
let entry_opt = self
.as_mut_slice()
.iter_mut()
.find(|entry| entry.function == *leaf && entry.index == *subleaf);
entry_opt.map(|entry| {
// JUSTIFICATION: There is no safe alternative.
// SAFETY: The `kvm_cpuid_entry2` and `CpuidEntry` are `repr(C)` with known sizes.
unsafe {
let arr: &mut [u8; size_of::<kvm_bindings::kvm_cpuid_entry2>()] = transmute(entry);
let arr2: &mut [u8; size_of::<CpuidEntry>()] =
(&mut arr[8..28]).try_into().unwrap();
transmute::<_, &mut CpuidEntry>(arr2)
}
})
}
}
/// Error type for [`CpuidTrait::apply_brand_string`].
#[derive(Debug, thiserror::Error, Eq, PartialEq)]
#[error("Missing brand string leaves 0x80000002, 0x80000003 and 0x80000004.")]
pub struct MissingBrandStringLeaves;
/// Error type for conversion from `kvm_bindings::CpuId` to `Cpuid`.
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum CpuidTryFromKvmCpuid {
/// Leaf 0 not found in the given `kvm_bindings::CpuId`.
MissingLeaf0,
/// Unsupported CPUID manufacturer id: \"{0:?}\" (only 'GenuineIntel' and 'AuthenticAMD' are supported).
UnsupportedVendor([u8; 12]),
}
/// CPUID information
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Cpuid {
/// Intel CPUID specific information.
Intel(IntelCpuid),
/// AMD CPUID specific information.
Amd(AmdCpuid),
}
impl Cpuid {
/// Returns `Some(&mut IntelCpuid)` if `Self == Self::Intel(_)` else returns `None`.
#[inline]
#[must_use]
pub fn intel_mut(&mut self) -> Option<&mut IntelCpuid> {
match self {
Self::Intel(intel) => Some(intel),
Self::Amd(_) => None,
}
}
/// Returns `Some(&IntelCpuid)` if `Self == Self::Intel(_)` else returns `None`.
#[inline]
#[must_use]
pub fn intel(&self) -> Option<&IntelCpuid> {
match self {
Self::Intel(intel) => Some(intel),
Self::Amd(_) => None,
}
}
/// Returns `Some(&AmdCpuid)` if `Self == Self::Amd(_)` else returns `None`.
#[inline]
#[must_use]
pub fn amd(&self) -> Option<&AmdCpuid> {
match self {
Self::Intel(_) => None,
Self::Amd(amd) => Some(amd),
}
}
/// Returns `Some(&mut AmdCpuid)` if `Self == Self::Amd(_)` else returns `None`.
#[inline]
#[must_use]
pub fn amd_mut(&mut self) -> Option<&mut AmdCpuid> {
match self {
Self::Intel(_) => None,
Self::Amd(amd) => Some(amd),
}
}
/// Returns imumutable reference to inner BTreeMap<CpuidKey, CpuidEntry>.
#[inline]
#[must_use]
pub fn inner(&self) -> &std::collections::BTreeMap<CpuidKey, CpuidEntry> {
match self {
Self::Intel(intel_cpuid) => &intel_cpuid.0,
Self::Amd(amd_cpuid) => &amd_cpuid.0,
}
}
/// Returns mutable reference to inner BTreeMap<CpuidKey, CpuidEntry>.
#[inline]
#[must_use]
pub fn inner_mut(&mut self) -> &mut std::collections::BTreeMap<CpuidKey, CpuidEntry> {
match self {
Self::Intel(intel_cpuid) => &mut intel_cpuid.0,
Self::Amd(amd_cpuid) => &mut amd_cpuid.0,
}
}
}
impl CpuidTrait for Cpuid {
/// Gets a given sub-leaf.
#[inline]
fn get(&self, key: &CpuidKey) -> Option<&CpuidEntry> {
match self {
Self::Intel(intel_cpuid) => intel_cpuid.get(key),
Self::Amd(amd_cpuid) => amd_cpuid.get(key),
}
}
/// Gets a given sub-leaf.
#[inline]
fn get_mut(&mut self, key: &CpuidKey) -> Option<&mut CpuidEntry> {
match self {
Self::Intel(intel_cpuid) => intel_cpuid.get_mut(key),
Self::Amd(amd_cpuid) => amd_cpuid.get_mut(key),
}
}
}
impl TryFrom<kvm_bindings::CpuId> for Cpuid {
type Error = CpuidTryFromKvmCpuid;
#[inline]
fn try_from(kvm_cpuid: kvm_bindings::CpuId) -> Result<Self, Self::Error> {
let vendor_id = kvm_cpuid
.vendor_id()
.ok_or(CpuidTryFromKvmCpuid::MissingLeaf0)?;
match std::str::from_utf8(&vendor_id) {
Ok(VENDOR_ID_INTEL_STR) => Ok(Cpuid::Intel(IntelCpuid::from(kvm_cpuid))),
Ok(VENDOR_ID_AMD_STR) => Ok(Cpuid::Amd(AmdCpuid::from(kvm_cpuid))),
_ => Err(CpuidTryFromKvmCpuid::UnsupportedVendor(vendor_id)),
}
}
}
impl TryFrom<Cpuid> for kvm_bindings::CpuId {
type Error = vmm_sys_util::fam::Error;
fn try_from(cpuid: Cpuid) -> Result<Self, Self::Error> {
let entries = cpuid
.inner()
.iter()
.map(|(key, entry)| kvm_bindings::kvm_cpuid_entry2 {
function: key.leaf,
index: key.subleaf,
flags: entry.flags.0,
eax: entry.result.eax,
ebx: entry.result.ebx,
ecx: entry.result.ecx,
edx: entry.result.edx,
..Default::default()
})
.collect::<Vec<_>>();
kvm_bindings::CpuId::from_entries(&entries)
}
}
/// CPUID index values `leaf` and `subleaf`.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct CpuidKey {
/// CPUID leaf.
pub leaf: u32,
/// CPUID subleaf.
pub subleaf: u32,
}
impl CpuidKey {
/// `CpuidKey { leaf, subleaf: 0 }`
#[inline]
#[must_use]
pub fn leaf(leaf: u32) -> Self {
Self { leaf, subleaf: 0 }
}
/// `CpuidKey { leaf, subleaf }`
#[inline]
#[must_use]
pub fn subleaf(leaf: u32, subleaf: u32) -> Self {
Self { leaf, subleaf }
}
}
impl std::cmp::PartialOrd for CpuidKey {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(std::cmp::Ord::cmp(self, other))
}
}
impl std::cmp::Ord for CpuidKey {
#[inline]
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.leaf
.cmp(&other.leaf)
.then(self.subleaf.cmp(&other.subleaf))
}
}
/// Definitions from `kvm/arch/x86/include/uapi/asm/kvm.h
#[derive(
Debug, serde::Serialize, serde::Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Hash,
)]
pub struct KvmCpuidFlags(pub u32);
impl KvmCpuidFlags {
/// Zero.
pub const EMPTY: Self = Self(0);
/// Indicates if the `index` field is used for indexing sub-leaves (if false, this CPUID leaf
/// has no subleaves).
pub const SIGNIFICANT_INDEX: Self = Self(1 << 0);
/// Deprecated.
pub const STATEFUL_FUNC: Self = Self(1 << 1);
/// Deprecated.
pub const STATE_READ_NEXT: Self = Self(1 << 2);
}
#[allow(clippy::derivable_impls)]
impl Default for KvmCpuidFlags {
#[inline]
fn default() -> Self {
Self(0)
}
}
/// CPUID entry information stored for each leaf of [`IntelCpuid`].
#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[repr(C)]
pub struct CpuidEntry {
/// The KVM requires a `flags` parameter which indicates if a given CPUID leaf has sub-leaves.
/// This does not change at runtime so we can save memory by not storing this under every
/// sub-leaf and instead fetching from a map when converting back to the KVM CPUID
/// structure. But for robustness we currently do store we do not use this approach.
///
/// A map on flags would look like:
/// ```ignore
/// #[allow(clippy::non_ascii_literal)]
/// pub static KVM_CPUID_LEAF_FLAGS: phf::Map<u32, KvmCpuidFlags> = phf::phf_map! {
/// 0x00u32 => KvmCpuidFlags::EMPTY,
/// 0x01u32 => KvmCpuidFlags::EMPTY,
/// 0x02u32 => KvmCpuidFlags::EMPTY,
/// 0x03u32 => KvmCpuidFlags::EMPTY,
/// 0x04u32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x05u32 => KvmCpuidFlags::EMPTY,
/// 0x06u32 => KvmCpuidFlags::EMPTY,
/// 0x07u32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x09u32 => KvmCpuidFlags::EMPTY,
/// 0x0Au32 => KvmCpuidFlags::EMPTY,
/// 0x0Bu32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x0Fu32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x10u32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x12u32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x14u32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x15u32 => KvmCpuidFlags::EMPTY,
/// 0x16u32 => KvmCpuidFlags::EMPTY,
/// 0x17u32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x18u32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x19u32 => KvmCpuidFlags::EMPTY,
/// 0x1Au32 => KvmCpuidFlags::EMPTY,
/// 0x1Bu32 => KvmCpuidFlags::EMPTY,
/// 0x1Cu32 => KvmCpuidFlags::EMPTY,
/// 0x1Fu32 => KvmCpuidFlags::SIGNIFICANT_INDEX,
/// 0x20u32 => KvmCpuidFlags::EMPTY,
/// 0x80000000u32 => KvmCpuidFlags::EMPTY,
/// 0x80000001u32 => KvmCpuidFlags::EMPTY,
/// 0x80000002u32 => KvmCpuidFlags::EMPTY,
/// 0x80000003u32 => KvmCpuidFlags::EMPTY,
/// 0x80000004u32 => KvmCpuidFlags::EMPTY,
/// 0x80000005u32 => KvmCpuidFlags::EMPTY,
/// 0x80000006u32 => KvmCpuidFlags::EMPTY,
/// 0x80000007u32 => KvmCpuidFlags::EMPTY,
/// 0x80000008u32 => KvmCpuidFlags::EMPTY,
/// };
/// ```
pub flags: KvmCpuidFlags,
/// Register values.
pub result: CpuidRegisters,
}
/// To transmute this into leaves such that we can return mutable reference to it with leaf specific
/// accessors, requires this to have a consistent member ordering.
/// [`core::arch::x86_64::CpuidResult`] is not `repr(C)`.
#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[repr(C)]
pub struct CpuidRegisters {
/// EAX
pub eax: u32,
/// EBX
pub ebx: u32,
/// ECX
pub ecx: u32,
/// EDX
pub edx: u32,
}
impl From<core::arch::x86_64::CpuidResult> for CpuidRegisters {
#[inline]
fn from(
core::arch::x86_64::CpuidResult { eax, ebx, ecx, edx }: core::arch::x86_64::CpuidResult,
) -> Self {
Self { eax, ebx, ecx, edx }
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use super::*;
fn build_intel_leaf0_for_cpuid() -> (CpuidKey, CpuidEntry) {
(
CpuidKey {
leaf: 0x0,
subleaf: 0x0,
},
CpuidEntry {
flags: KvmCpuidFlags::EMPTY,
result: CpuidRegisters {
eax: 0x1,
// GenuineIntel
ebx: 0x756E6547,
ecx: 0x6C65746E,
edx: 0x49656E69,
},
},
)
}
fn build_intel_leaf0_for_kvmcpuid() -> kvm_bindings::kvm_cpuid_entry2 {
kvm_bindings::kvm_cpuid_entry2 {
function: 0x0,
index: 0x0,
flags: 0x0,
eax: 0x1,
// GenuineIntel
ebx: 0x756E6547,
ecx: 0x6C65746E,
edx: 0x49656E69,
..Default::default()
}
}
fn build_amd_leaf0_for_cpuid() -> (CpuidKey, CpuidEntry) {
(
CpuidKey {
leaf: 0x0,
subleaf: 0x0,
},
CpuidEntry {
flags: KvmCpuidFlags::EMPTY,
result: CpuidRegisters {
eax: 0x1,
// AuthenticAMD
ebx: 0x68747541,
ecx: 0x444D4163,
edx: 0x69746E65,
},
},
)
}
fn build_amd_leaf0_for_kvmcpuid() -> kvm_bindings::kvm_cpuid_entry2 {
kvm_bindings::kvm_cpuid_entry2 {
function: 0x0,
index: 0x0,
flags: 0x0,
eax: 0x1,
// AuthenticAMD
ebx: 0x68747541,
ecx: 0x444D4163,
edx: 0x69746E65,
..Default::default()
}
}
fn build_sample_leaf_for_cpuid() -> (CpuidKey, CpuidEntry) {
(
CpuidKey {
leaf: 0x1,
subleaf: 0x2,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0x3,
ebx: 0x4,
ecx: 0x5,
edx: 0x6,
},
},
)
}
fn build_sample_leaf_for_kvmcpuid() -> kvm_bindings::kvm_cpuid_entry2 {
kvm_bindings::kvm_cpuid_entry2 {
function: 0x1,
index: 0x2,
flags: 0x1,
eax: 0x3,
ebx: 0x4,
ecx: 0x5,
edx: 0x6,
..Default::default()
}
}
fn build_sample_intel_cpuid() -> Cpuid {
Cpuid::Intel(IntelCpuid(BTreeMap::from([
build_intel_leaf0_for_cpuid(),
build_sample_leaf_for_cpuid(),
])))
}
fn build_sample_intel_kvmcpuid() -> kvm_bindings::CpuId {
kvm_bindings::CpuId::from_entries(&[
build_intel_leaf0_for_kvmcpuid(),
build_sample_leaf_for_kvmcpuid(),
])
.unwrap()
}
fn build_sample_amd_cpuid() -> Cpuid {
Cpuid::Amd(AmdCpuid(BTreeMap::from([
build_amd_leaf0_for_cpuid(),
build_sample_leaf_for_cpuid(),
])))
}
fn build_sample_amd_kvmcpuid() -> kvm_bindings::CpuId {
kvm_bindings::CpuId::from_entries(&[
build_amd_leaf0_for_kvmcpuid(),
build_sample_leaf_for_kvmcpuid(),
])
.unwrap()
}
#[test]
fn get() {
let cpuid = build_sample_intel_cpuid();
assert_eq!(
cpuid.get(&CpuidKey {
leaf: 0x8888,
subleaf: 0x0
}),
None
);
assert!(
cpuid
.get(&CpuidKey {
leaf: 0x0,
subleaf: 0x0,
})
.is_some()
);
}
#[test]
fn get_mut() {
let mut cpuid = build_sample_intel_cpuid();
assert_eq!(
cpuid.get_mut(&CpuidKey {
leaf: 0x888,
subleaf: 0x0,
}),
None
);
assert!(
cpuid
.get_mut(&CpuidKey {
leaf: 0x0,
subleaf: 0x0,
})
.is_some()
);
}
#[test]
fn test_kvmcpuid_to_cpuid() {
let kvm_cpuid = build_sample_intel_kvmcpuid();
let cpuid = Cpuid::try_from(kvm_cpuid).unwrap();
assert_eq!(cpuid, build_sample_intel_cpuid());
let kvm_cpuid = build_sample_amd_kvmcpuid();
let cpuid = Cpuid::try_from(kvm_cpuid).unwrap();
assert_eq!(cpuid, build_sample_amd_cpuid());
}
#[test]
fn test_cpuid_to_kvmcpuid() {
let cpuid = build_sample_intel_cpuid();
let kvm_cpuid = kvm_bindings::CpuId::try_from(cpuid).unwrap();
assert_eq!(kvm_cpuid, build_sample_intel_kvmcpuid());
let cpuid = build_sample_amd_cpuid();
let kvm_cpuid = kvm_bindings::CpuId::try_from(cpuid).unwrap();
assert_eq!(kvm_cpuid, build_sample_amd_kvmcpuid());
}
#[test]
fn test_invalid_kvmcpuid_to_cpuid() {
// If leaf 0 contains invalid vendor ID, the type conversion should fail.
let kvm_cpuid =
kvm_bindings::CpuId::from_entries(&[kvm_bindings::kvm_cpuid_entry2::default()])
.unwrap();
let cpuid = Cpuid::try_from(kvm_cpuid);
assert_eq!(cpuid, Err(CpuidTryFromKvmCpuid::UnsupportedVendor([0; 12])));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/cpuid/normalize.rs | src/vmm/src/cpu_config/x86_64/cpuid/normalize.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::cpu_config::x86_64::cpuid::{
CpuidEntry, CpuidKey, CpuidRegisters, CpuidTrait, KvmCpuidFlags, cpuid,
};
use crate::logger::warn;
use crate::vmm_config::machine_config::MAX_SUPPORTED_VCPUS;
/// Error type for [`super::Cpuid::normalize`].
#[allow(clippy::module_name_repetitions)]
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum NormalizeCpuidError {
/// Provided `cpu_bits` is >=8: {0}.
CpuBits(u8),
/// Failed to apply modifications to Intel CPUID: {0}
Intel(#[from] crate::cpu_config::x86_64::cpuid::intel::NormalizeCpuidError),
/// Failed to apply modifications to AMD CPUID: {0}
Amd(#[from] crate::cpu_config::x86_64::cpuid::amd::NormalizeCpuidError),
/// Failed to set feature information leaf: {0}
FeatureInformation(#[from] FeatureInformationError),
/// Failed to set extended topology leaf: {0}
ExtendedTopology(#[from] ExtendedTopologyError),
/// Failed to set extended cache features leaf: {0}
ExtendedCacheFeatures(#[from] ExtendedCacheFeaturesError),
/// Failed to set vendor ID in leaf 0x0: {0}
VendorId(#[from] VendorIdError),
}
/// Error type for setting leaf 0 section.
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum VendorIdError {
/// Leaf 0x0 is missing from CPUID.
MissingLeaf0,
}
/// Error type for setting leaf 1 section of `IntelCpuid::normalize`.
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum FeatureInformationError {
/// Leaf 0x1 is missing from CPUID.
MissingLeaf1,
/// Failed to set `Initial APIC ID`: {0}
InitialApicId(CheckedAssignError),
/// Failed to set `CLFLUSH line size`: {0}
Clflush(CheckedAssignError),
/// Failed to get max CPUs per package: {0}
GetMaxCpusPerPackage(GetMaxCpusPerPackageError),
/// Failed to set max CPUs per package: {0}
SetMaxCpusPerPackage(CheckedAssignError),
}
/// Error type for `get_max_cpus_per_package`.
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum GetMaxCpusPerPackageError {
/// Failed to get max CPUs per package as `cpu_count == 0`
Underflow,
/// Failed to get max CPUs per package as `cpu_count > 128`
Overflow,
}
/// Error type for setting leaf b section of `IntelCpuid::normalize`.
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum ExtendedTopologyError {
/// Failed to set domain type (CPUID.(EAX=0xB,ECX={0}):ECX[15:8]): {1}
DomainType(u32, CheckedAssignError),
/// Failed to set input ECX (CPUID.(EAX=0xB,ECX={0}):ECX[7:0]): {1}
InputEcx(u32, CheckedAssignError),
/// Failed to set number of logical processors (CPUID.(EAX=0xB,ECX={0}):EBX[15:0]): {1}
NumLogicalProcs(u32, CheckedAssignError),
/// Failed to set right-shift bits (CPUID.(EAX=0xB,ECX={0}):EAX[4:0]): {1}
RightShiftBits(u32, CheckedAssignError),
}
/// Error type for setting leaf 0x80000006 of Cpuid::normalize().
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum ExtendedCacheFeaturesError {
/// Leaf 0x80000005 is missing from CPUID.
MissingLeaf0x80000005,
/// Leaf 0x80000006 is missing from CPUID.
MissingLeaf0x80000006,
}
/// Error type for setting a bit range.
#[derive(Debug, PartialEq, Eq, thiserror::Error)]
#[error("Given value is greater than maximum storable value in bit range.")]
pub struct CheckedAssignError;
/// Sets a given bit to a true or false (1 or 0).
#[allow(clippy::arithmetic_side_effects)]
pub fn set_bit(x: &mut u32, bit: u8, y: bool) {
debug_assert!(bit < 32);
*x = (*x & !(1 << bit)) | ((u32::from(u8::from(y))) << bit);
}
/// Sets a given range to a given value.
pub fn set_range(
x: &mut u32,
range: std::ops::RangeInclusive<u8>,
y: u32,
) -> Result<(), CheckedAssignError> {
let start = *range.start();
let end = *range.end();
debug_assert!(end >= start);
debug_assert!(end < 32);
// Ensure `y` fits within the number of bits in the specified range.
// Note that
// - 1 <= `num_bits` <= 32 from the above assertion
// - if `num_bits` equals to 32, `y` always fits within it since `y` is `u32`.
let num_bits = end - start + 1;
if num_bits < 32 && y >= (1u32 << num_bits) {
return Err(CheckedAssignError);
}
let mask = get_mask(range);
*x = (*x & !mask) | (y << start);
Ok(())
}
/// Gets a given range within a given value.
pub fn get_range(x: u32, range: std::ops::RangeInclusive<u8>) -> u32 {
let start = *range.start();
let end = *range.end();
debug_assert!(end >= start);
debug_assert!(end < 32);
let mask = get_mask(range);
(x & mask) >> start
}
/// Returns a mask where the given range is ones.
const fn get_mask(range: std::ops::RangeInclusive<u8>) -> u32 {
let num_bits = *range.end() - *range.start() + 1;
let shift = *range.start();
if num_bits == 32 {
u32::MAX
} else {
((1u32 << num_bits) - 1) << shift
}
}
// We use this 2nd implementation so we can conveniently define functions only used within
// `normalize`.
#[allow(clippy::multiple_inherent_impl)]
impl super::Cpuid {
/// Applies required modifications to CPUID respective of a vCPU.
///
/// # Errors
///
/// When:
/// - [`super::IntelCpuid::normalize`] errors.
/// - [`super::AmdCpuid::normalize`] errors.
// As we pass through host frequency, we require CPUID and thus `cfg(cpuid)`.
#[inline]
pub fn normalize(
&mut self,
// The index of the current logical CPU in the range [0..cpu_count].
cpu_index: u8,
// The total number of logical CPUs.
cpu_count: u8,
// The number of bits needed to enumerate logical CPUs per core.
cpu_bits: u8,
) -> Result<(), NormalizeCpuidError> {
let cpus_per_core = 1u8
.checked_shl(u32::from(cpu_bits))
.ok_or(NormalizeCpuidError::CpuBits(cpu_bits))?;
self.update_vendor_id()?;
self.update_feature_info_entry(cpu_index, cpu_count)?;
self.update_extended_topology_entry(cpu_index, cpu_count, cpu_bits, cpus_per_core)?;
self.update_extended_cache_features()?;
// Apply manufacturer specific modifications.
match self {
// Apply Intel specific modifications.
Self::Intel(intel_cpuid) => {
intel_cpuid.normalize(cpu_index, cpu_count, cpus_per_core)?;
}
// Apply AMD specific modifications.
Self::Amd(amd_cpuid) => amd_cpuid.normalize(cpu_index, cpu_count, cpus_per_core)?,
}
Ok(())
}
/// Pass-through the vendor ID from the host. This is used to prevent modification of the vendor
/// ID via custom CPU templates.
fn update_vendor_id(&mut self) -> Result<(), VendorIdError> {
let leaf_0 = self
.get_mut(&CpuidKey::leaf(0x0))
.ok_or(VendorIdError::MissingLeaf0)?;
let host_leaf_0 = cpuid(0x0);
leaf_0.result.ebx = host_leaf_0.ebx;
leaf_0.result.ecx = host_leaf_0.ecx;
leaf_0.result.edx = host_leaf_0.edx;
Ok(())
}
// Update feature information entry
fn update_feature_info_entry(
&mut self,
cpu_index: u8,
cpu_count: u8,
) -> Result<(), FeatureInformationError> {
let leaf_1 = self
.get_mut(&CpuidKey::leaf(0x1))
.ok_or(FeatureInformationError::MissingLeaf1)?;
// CPUID.01H:EBX[15:08]
// CLFLUSH line size (Value * 8 = cache line size in bytes; used also by CLFLUSHOPT).
set_range(&mut leaf_1.result.ebx, 8..=15, 8).map_err(FeatureInformationError::Clflush)?;
// CPUID.01H:EBX[23:16]
// Maximum number of addressable IDs for logical processors in this physical package.
//
// The nearest power-of-2 integer that is not smaller than EBX[23:16] is the number of
// unique initial APIC IDs reserved for addressing different logical processors in a
// physical package. This field is only valid if CPUID.1.EDX.HTT[bit 28]= 1.
let max_cpus_per_package = u32::from(
get_max_cpus_per_package(cpu_count)
.map_err(FeatureInformationError::GetMaxCpusPerPackage)?,
);
set_range(&mut leaf_1.result.ebx, 16..=23, max_cpus_per_package)
.map_err(FeatureInformationError::SetMaxCpusPerPackage)?;
// CPUID.01H:EBX[31:24]
// Initial APIC ID.
//
// The 8-bit initial APIC ID in EBX[31:24] is replaced by the 32-bit x2APIC ID, available
// in Leaf 0BH and Leaf 1FH.
set_range(&mut leaf_1.result.ebx, 24..=31, u32::from(cpu_index))
.map_err(FeatureInformationError::InitialApicId)?;
// CPUID.01H:ECX[15] (Mnemonic: PDCM)
// Performance and Debug Capability: A value of 1 indicates the processor supports the
// performance and debug feature indication MSR IA32_PERF_CAPABILITIES.
set_bit(&mut leaf_1.result.ecx, 15, false);
// CPUID.01H:ECX[24] (Mnemonic: TSC-Deadline)
// A value of 1 indicates that the processor’s local APIC timer supports one-shot operation
// using a TSC deadline value.
set_bit(&mut leaf_1.result.ecx, 24, true);
// CPUID.01H:ECX[31] (Mnemonic: Hypervisor)
set_bit(&mut leaf_1.result.ecx, 31, true);
// CPUID.01H:EDX[28] (Mnemonic: HTT)
// Max APIC IDs reserved field is Valid. A value of 0 for HTT indicates there is only a
// single logical processor in the package and software should assume only a single APIC ID
// is reserved. A value of 1 for HTT indicates the value in CPUID.1.EBX[23:16] (the Maximum
// number of addressable IDs for logical processors in this package) is valid for the
// package.
set_bit(&mut leaf_1.result.edx, 28, cpu_count > 1);
Ok(())
}
/// Update extended topology entry
fn update_extended_topology_entry(
&mut self,
cpu_index: u8,
cpu_count: u8,
cpu_bits: u8,
cpus_per_core: u8,
) -> Result<(), ExtendedTopologyError> {
// The following commit changed the behavior of KVM_GET_SUPPORTED_CPUID to no longer
// include CPUID.(EAX=0BH,ECX=1).
// https://lore.kernel.org/all/20221027092036.2698180-1-pbonzini@redhat.com/
self.inner_mut()
.entry(CpuidKey::subleaf(0xB, 0x1))
.or_insert(CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0x0,
ebx: 0x0,
ecx: 0x0,
edx: 0x0,
},
});
for index in 0.. {
if let Some(subleaf) = self.get_mut(&CpuidKey::subleaf(0xB, index)) {
// Reset eax, ebx, ecx
subleaf.result.eax = 0;
subleaf.result.ebx = 0;
subleaf.result.ecx = 0;
// CPUID.(EAX=0BH,ECX=N).EDX[31:0]
// x2APIC ID of the current logical processor.
subleaf.result.edx = u32::from(cpu_index);
subleaf.flags = KvmCpuidFlags::SIGNIFICANT_INDEX;
match index {
// CPUID.(EAX=0BH,ECX=N):EAX[4:0]
// The number of bits that the x2APIC ID must be shifted to the right to address
// instances of the next higher-scoped domain. When logical processor is not
// supported by the processor, the value of this field at the Logical Processor
// domain sub-leaf may be returned as either 0 (no allocated bits in the x2APIC
// ID) or 1 (one allocated bit in the x2APIC ID); software should plan
// accordingly.
// CPUID.(EAX=0BH,ECX=N):EBX[15:0]
// The number of logical processors across all instances of this domain within
// the next-higher scoped domain. (For example, in a processor socket/package
// comprising "M" dies of "N" cores each, where each core has "L" logical
// processors, the "die" domain sub-leaf value of this field would be M*N*L.)
// This number reflects configuration as shipped by Intel. Note, software must
// not use this field to enumerate processor topology.
// CPUID.(EAX=0BH,ECX=N):ECX[7:0]
// The input ECX sub-leaf index.
// CPUID.(EAX=0BH,ECX=N):ECX[15:8]
// Domain Type. This field provides an identification value which indicates the
// domain as shown below. Although domains are ordered, their assigned
// identification values are not and software should not depend on it.
//
// Hierarchy Domain Domain Type Identification Value
// -----------------------------------------------------------------
// Lowest Logical Processor 1
// Highest Core 2
//
// (Note that enumeration values of 0 and 3-255 are reserved.)
// Logical processor domain
0 => {
// To get the next level APIC ID, shift right with at most 1 because we have
// maximum 2 logical procerssors per core that can be represented by 1 bit.
set_range(&mut subleaf.result.eax, 0..=4, u32::from(cpu_bits))
.map_err(|err| ExtendedTopologyError::RightShiftBits(index, err))?;
// When cpu_count == 1 or HT is disabled, there is 1 logical core at this
// domain; otherwise there are 2
set_range(&mut subleaf.result.ebx, 0..=15, u32::from(cpus_per_core))
.map_err(|err| ExtendedTopologyError::NumLogicalProcs(index, err))?;
// Skip setting 0 to ECX[7:0] since it's already reset to 0.
// Set the domain type identification value for logical processor,
set_range(&mut subleaf.result.ecx, 8..=15, 1)
.map_err(|err| ExtendedTopologyError::DomainType(index, err))?;
}
// Core domain
1 => {
// Configure such that the next higher-scoped domain (i.e. socket) include
// all logical processors.
//
// The CPUID.(EAX=0BH,ECX=1).EAX[4:0] value must be an integer N such that
// 2^N is greater than or equal to the maximum number of vCPUs.
set_range(
&mut subleaf.result.eax,
0..=4,
MAX_SUPPORTED_VCPUS.next_power_of_two().ilog2(),
)
.map_err(|err| ExtendedTopologyError::RightShiftBits(index, err))?;
set_range(&mut subleaf.result.ebx, 0..=15, u32::from(cpu_count))
.map_err(|err| ExtendedTopologyError::NumLogicalProcs(index, err))?;
// Setting the input ECX value (i.e. `index`)
set_range(&mut subleaf.result.ecx, 0..=7, index)
.map_err(|err| ExtendedTopologyError::InputEcx(index, err))?;
// Set the domain type identification value for core.
set_range(&mut subleaf.result.ecx, 8..=15, 2)
.map_err(|err| ExtendedTopologyError::DomainType(index, err))?;
}
_ => {
// KVM no longer returns any subleaves greater than 0. The patch was merged
// in v6.2 and backported to v5.10. So for all our supported kernels,
// subleaves >= 2 should not be included.
// https://github.com/torvalds/linux/commit/45e966fcca03ecdcccac7cb236e16eea38cc18af
//
// However, we intentionally leave Firecracker not fail for unsupported
// kernels to keep working. Note that we can detect KVM regression thanks
// to the test that compares a fingerprint with its baseline.
warn!("Subleaf {index} not expected for CPUID leaf 0xB.");
subleaf.result.ecx = index;
}
}
} else {
break;
}
}
Ok(())
}
// Update extended cache features entry
fn update_extended_cache_features(&mut self) -> Result<(), ExtendedCacheFeaturesError> {
// Leaf 0x800000005 indicates L1 Cache and TLB Information.
let guest_leaf_0x80000005 = self
.get_mut(&CpuidKey::leaf(0x80000005))
.ok_or(ExtendedCacheFeaturesError::MissingLeaf0x80000005)?;
guest_leaf_0x80000005.result = cpuid(0x80000005).into();
// Leaf 0x80000006 indicates L2 Cache and TLB and L3 Cache Information.
let guest_leaf_0x80000006 = self
.get_mut(&CpuidKey::leaf(0x80000006))
.ok_or(ExtendedCacheFeaturesError::MissingLeaf0x80000006)?;
guest_leaf_0x80000006.result = cpuid(0x80000006).into();
guest_leaf_0x80000006.result.edx &= !0x00030000; // bits [17:16] are reserved
Ok(())
}
}
/// The maximum number of logical processors per package is computed as the closest
/// power of 2 higher or equal to the CPU count configured by the user.
const fn get_max_cpus_per_package(cpu_count: u8) -> Result<u8, GetMaxCpusPerPackageError> {
// This match is better than but approximately equivalent to
// `2.pow((cpu_count as f32).log2().ceil() as u8)` (`2^ceil(log_2(c))`).
match cpu_count {
0 => Err(GetMaxCpusPerPackageError::Underflow),
// `0u8.checked_next_power_of_two()` returns `Some(1)`, this is not the desired behaviour so
// we use `next_power_of_two()` instead.
1..=128 => Ok(cpu_count.next_power_of_two()),
129..=u8::MAX => Err(GetMaxCpusPerPackageError::Overflow),
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use super::*;
use crate::cpu_config::x86_64::cpuid::{AmdCpuid, Cpuid, IntelCpuid};
#[test]
fn get_max_cpus_per_package_test() {
assert_eq!(
get_max_cpus_per_package(0),
Err(GetMaxCpusPerPackageError::Underflow)
);
assert_eq!(get_max_cpus_per_package(1), Ok(1));
assert_eq!(get_max_cpus_per_package(2), Ok(2));
assert_eq!(get_max_cpus_per_package(3), Ok(4));
assert_eq!(get_max_cpus_per_package(4), Ok(4));
assert_eq!(get_max_cpus_per_package(5), Ok(8));
assert_eq!(get_max_cpus_per_package(8), Ok(8));
assert_eq!(get_max_cpus_per_package(9), Ok(16));
assert_eq!(get_max_cpus_per_package(16), Ok(16));
assert_eq!(get_max_cpus_per_package(17), Ok(32));
assert_eq!(get_max_cpus_per_package(32), Ok(32));
assert_eq!(get_max_cpus_per_package(33), Ok(64));
assert_eq!(get_max_cpus_per_package(64), Ok(64));
assert_eq!(get_max_cpus_per_package(65), Ok(128));
assert_eq!(get_max_cpus_per_package(128), Ok(128));
assert_eq!(
get_max_cpus_per_package(129),
Err(GetMaxCpusPerPackageError::Overflow)
);
assert_eq!(
get_max_cpus_per_package(u8::MAX),
Err(GetMaxCpusPerPackageError::Overflow)
);
}
#[test]
fn test_update_vendor_id() {
// Check `update_vendor_id()` passes through the vendor ID from the host correctly.
// Pseudo CPUID with invalid vendor ID.
let mut guest_cpuid = Cpuid::Intel(IntelCpuid(BTreeMap::from([(
CpuidKey {
leaf: 0x0,
subleaf: 0x0,
},
CpuidEntry {
flags: KvmCpuidFlags::EMPTY,
result: CpuidRegisters {
eax: 0,
ebx: 0x0123_4567,
ecx: 0x89ab_cdef,
edx: 0x55aa_55aa,
},
},
)])));
// Pass through vendor ID from host.
guest_cpuid.update_vendor_id().unwrap();
// Check if the guest vendor ID matches the host one.
let guest_leaf_0 = guest_cpuid
.get(&CpuidKey {
leaf: 0x0,
subleaf: 0x0,
})
.unwrap();
let host_leaf_0 = cpuid(0x0);
assert_eq!(guest_leaf_0.result.ebx, host_leaf_0.ebx);
assert_eq!(guest_leaf_0.result.ecx, host_leaf_0.ecx);
assert_eq!(guest_leaf_0.result.edx, host_leaf_0.edx);
}
#[test]
fn check_leaf_0xb_subleaf_0x1_added() {
// Check leaf 0xb / subleaf 0x1 is added in `update_extended_topology_entry()` even when it
// isn't included.
// Pseudo CPU setting
let smt = false;
let cpu_index = 0;
let cpu_count = 2;
let cpu_bits = u8::from(cpu_count > 1 && smt);
let cpus_per_core = 1u8
.checked_shl(u32::from(cpu_bits))
.ok_or(NormalizeCpuidError::CpuBits(cpu_bits))
.unwrap();
// Case 1: Intel CPUID
let mut intel_cpuid = Cpuid::Intel(IntelCpuid(BTreeMap::from([(
CpuidKey {
leaf: 0xb,
subleaf: 0,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0,
ebx: 0,
ecx: 0,
edx: 0,
},
},
)])));
let result = intel_cpuid.update_extended_topology_entry(
cpu_index,
cpu_count,
cpu_bits,
cpus_per_core,
);
result.unwrap();
assert!(intel_cpuid.inner().contains_key(&CpuidKey {
leaf: 0xb,
subleaf: 0x1
}));
// Case 2: AMD CPUID
let mut amd_cpuid = Cpuid::Amd(AmdCpuid(BTreeMap::from([(
CpuidKey {
leaf: 0xb,
subleaf: 0,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0,
ebx: 0,
ecx: 0,
edx: 0,
},
},
)])));
let result =
amd_cpuid.update_extended_topology_entry(cpu_index, cpu_count, cpu_bits, cpus_per_core);
result.unwrap();
assert!(amd_cpuid.inner().contains_key(&CpuidKey {
leaf: 0xb,
subleaf: 0x1
}));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/cpuid/common.rs | src/vmm/src/cpu_config/x86_64/cpuid/common.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::restriction)]
use crate::arch::x86_64::generated::msr_index::{
MSR_IA32_BNDCFGS, MSR_IA32_CR_PAT, MSR_MTRRdefType, MSR_MTRRfix4K_C0000, MSR_MTRRfix4K_C8000,
MSR_MTRRfix4K_D0000, MSR_MTRRfix4K_D8000, MSR_MTRRfix4K_E0000, MSR_MTRRfix4K_E8000,
MSR_MTRRfix4K_F0000, MSR_MTRRfix4K_F8000, MSR_MTRRfix16K_80000, MSR_MTRRfix16K_A0000,
MSR_MTRRfix64K_00000,
};
/// Error type for [`get_cpuid`].
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum GetCpuidError {
/// Un-supported leaf: {0}
UnsupportedLeaf(u32),
/// Invalid subleaf: {0}
InvalidSubleaf(u32),
}
/// Extract entry from the cpuid.
///
/// # Errors
///
/// - When the given `leaf` is more than `max_leaf` supported by CPUID.
/// - When the CPUID leaf `sub-leaf` is invalid (all its register equal 0).
pub fn get_cpuid(leaf: u32, subleaf: u32) -> Result<std::arch::x86_64::CpuidResult, GetCpuidError> {
let max_leaf =
// JUSTIFICATION: There is no safe alternative.
// SAFETY: This is safe because the host supports the `cpuid` instruction
unsafe { std::arch::x86_64::__get_cpuid_max(leaf & 0x8000_0000).0 };
if leaf > max_leaf {
return Err(GetCpuidError::UnsupportedLeaf(leaf));
}
let entry = crate::cpu_config::x86_64::cpuid::cpuid_count(leaf, subleaf);
if entry.eax == 0 && entry.ebx == 0 && entry.ecx == 0 && entry.edx == 0 {
return Err(GetCpuidError::InvalidSubleaf(subleaf));
}
Ok(entry)
}
/// Extracts the CPU vendor id from leaf 0x0.
///
/// # Errors
///
/// When CPUID leaf 0 is not supported.
pub fn get_vendor_id_from_host() -> Result<[u8; 12], GetCpuidError> {
// JUSTIFICATION: There is no safe alternative.
// SAFETY: Always safe.
get_cpuid(0, 0).map(|vendor_entry| unsafe {
// The ordering of the vendor string is ebx,edx,ecx this is not a mistake.
std::mem::transmute::<[u32; 3], [u8; 12]>([
vendor_entry.ebx,
vendor_entry.edx,
vendor_entry.ecx,
])
})
}
/// Returns MSRs to be saved based on CPUID features that are enabled.
pub(crate) fn msrs_to_save_by_cpuid(cpuid: &kvm_bindings::CpuId) -> Vec<u32> {
/// Memory Protection Extensions
const MPX_BITINDEX: u32 = 14;
/// Memory Type Range Registers
const MTRR_BITINDEX: u32 = 12;
/// Memory Check Exception
const MCE_BITINDEX: u32 = 7;
/// Scans through the CPUID and determines if a feature bit is set.
// TODO: This currently involves a linear search which would be improved
// when we'll refactor the cpuid crate.
macro_rules! cpuid_is_feature_set {
($cpuid:ident, $leaf:expr, $index:expr, $reg:tt, $feature_bit:expr) => {{
let mut res = false;
for entry in $cpuid.as_slice().iter() {
if entry.function == $leaf && entry.index == $index {
if entry.$reg & (1 << $feature_bit) != 0 {
res = true;
break;
}
}
}
res
}};
}
let mut msrs = Vec::new();
// Macro used for easy definition of CPUID-MSR dependencies.
macro_rules! cpuid_msr_dep {
($leaf:expr, $index:expr, $reg:tt, $feature_bit:expr, $msr:expr) => {
if cpuid_is_feature_set!(cpuid, $leaf, $index, $reg, $feature_bit) {
msrs.extend($msr)
}
};
}
// TODO: Add more dependencies.
cpuid_msr_dep!(0x7, 0, ebx, MPX_BITINDEX, [MSR_IA32_BNDCFGS]);
// IA32_MTRR_PHYSBASEn, IA32_MTRR_PHYSMASKn
cpuid_msr_dep!(0x1, 0, edx, MTRR_BITINDEX, 0x200..0x210);
// Other MTRR MSRs
cpuid_msr_dep!(
0x1,
0,
edx,
MTRR_BITINDEX,
[
MSR_MTRRfix64K_00000,
MSR_MTRRfix16K_80000,
MSR_MTRRfix16K_A0000,
MSR_MTRRfix4K_C0000,
MSR_MTRRfix4K_C8000,
MSR_MTRRfix4K_D0000,
MSR_MTRRfix4K_D8000,
MSR_MTRRfix4K_E0000,
MSR_MTRRfix4K_E8000,
MSR_MTRRfix4K_F0000,
MSR_MTRRfix4K_F8000,
MSR_IA32_CR_PAT,
MSR_MTRRdefType,
]
);
// MCE MSRs
// We are saving 32 MCE banks here as this is the maximum number supported by KVM
// and configured by default.
// The physical number of the MCE banks depends on the CPU.
// The number of emulated MCE banks can be configured via KVM_X86_SETUP_MCE.
cpuid_msr_dep!(0x1, 0, edx, MCE_BITINDEX, 0x400..0x480);
msrs
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get_cpuid_unsupported_leaf() {
let max_leaf =
// JUSTIFICATION: There is no safe alternative.
// SAFETY: This is safe because the host supports the `cpuid` instruction
unsafe { std::arch::x86_64::__get_cpuid_max(0).0 };
let max_leaf_plus_one = max_leaf + 1;
assert_eq!(
get_cpuid(max_leaf_plus_one, 0),
Err(GetCpuidError::UnsupportedLeaf(max_leaf_plus_one))
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/cpuid/amd/mod.rs | src/vmm/src/cpu_config/x86_64/cpuid/amd/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::similar_names, clippy::unreadable_literal)]
use super::{CpuidEntry, CpuidKey, CpuidRegisters, CpuidTrait, KvmCpuidFlags};
/// CPUID normalize implementation.
mod normalize;
pub use normalize::{
ExtendedApicIdError, ExtendedCacheTopologyError, FeatureEntryError, NormalizeCpuidError,
};
/// A structure matching the AMD CPUID specification as described in
/// [AMD64 Architecture Programmer’s Manual Volume 3: General-Purpose and System Instructions](https://www.amd.com/system/files/TechDocs/24594.pdf)
/// .
#[allow(clippy::module_name_repetitions)]
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct AmdCpuid(pub std::collections::BTreeMap<CpuidKey, CpuidEntry>);
impl CpuidTrait for AmdCpuid {
/// Gets a given sub-leaf.
#[inline]
fn get(&self, key: &CpuidKey) -> Option<&CpuidEntry> {
self.0.get(key)
}
/// Gets a given sub-leaf.
#[inline]
fn get_mut(&mut self, key: &CpuidKey) -> Option<&mut CpuidEntry> {
self.0.get_mut(key)
}
}
impl From<kvm_bindings::CpuId> for AmdCpuid {
#[inline]
fn from(kvm_cpuid: kvm_bindings::CpuId) -> Self {
let map = kvm_cpuid
.as_slice()
.iter()
.map(|entry| {
(
CpuidKey {
leaf: entry.function,
subleaf: entry.index,
},
CpuidEntry {
flags: KvmCpuidFlags(entry.flags),
result: CpuidRegisters {
eax: entry.eax,
ebx: entry.ebx,
ecx: entry.ecx,
edx: entry.edx,
},
},
)
})
.collect();
Self(map)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get() {
let cpuid = AmdCpuid(std::collections::BTreeMap::new());
assert_eq!(
cpuid.get(&CpuidKey {
leaf: 0,
subleaf: 0
}),
None
);
}
#[test]
fn get_mut() {
let mut cpuid = AmdCpuid(std::collections::BTreeMap::new());
assert_eq!(
cpuid.get_mut(&CpuidKey {
leaf: 0,
subleaf: 0
}),
None
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/cpuid/amd/normalize.rs | src/vmm/src/cpu_config/x86_64/cpuid/amd/normalize.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::cpu_config::x86_64::cpuid::common::{GetCpuidError, get_vendor_id_from_host};
use crate::cpu_config::x86_64::cpuid::normalize::{
CheckedAssignError, get_range, set_bit, set_range,
};
use crate::cpu_config::x86_64::cpuid::{
BRAND_STRING_LENGTH, CpuidEntry, CpuidKey, CpuidRegisters, CpuidTrait, KvmCpuidFlags,
MissingBrandStringLeaves, VENDOR_ID_AMD, cpuid, cpuid_count,
};
/// Error type for [`super::AmdCpuid::normalize`].
#[allow(clippy::module_name_repetitions)]
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum NormalizeCpuidError {
/// Provided `cpu_bits` is >=8: {0}.
CpuBits(u8),
/// Failed to passthrough cache topology: {0}
PassthroughCacheTopology(#[from] PassthroughCacheTopologyError),
/// Missing leaf 0x7 / subleaf 0.
MissingLeaf0x7Subleaf0,
/// Missing leaf 0x80000000.
MissingLeaf0x80000000,
/// Missing leaf 0x80000001.
MissingLeaf0x80000001,
/// Failed to set feature entry leaf: {0}
FeatureEntry(#[from] FeatureEntryError),
/// Failed to set extended cache topology leaf: {0}
ExtendedCacheTopology(#[from] ExtendedCacheTopologyError),
/// Failed to set extended APIC ID leaf: {0}
ExtendedApicId(#[from] ExtendedApicIdError),
/// Failed to set brand string: {0}
BrandString(MissingBrandStringLeaves),
}
/// Error type for setting cache topology section of [`super::AmdCpuid::normalize`].
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum PassthroughCacheTopologyError {
/// Failed to get the host vendor id: {0}
NoVendorId(GetCpuidError),
/// The host vendor id does not match AMD.
BadVendorId,
}
/// Error type for setting leaf 0x80000008 section of [`super::AmdCpuid::normalize`].
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum FeatureEntryError {
/// Missing leaf 0x80000008.
MissingLeaf0x80000008,
/// Failed to set number of physical threads (CPUID.80000008H:ECX[7:0]): {0}
NumberOfPhysicalThreads(CheckedAssignError),
/// Failed to set number of physical threads (CPUID.80000008H:ECX[7:0]) due to overflow.
NumberOfPhysicalThreadsOverflow,
}
/// Error type for setting leaf 0x8000001d section of [`super::AmdCpuid::normalize`].
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum ExtendedCacheTopologyError {
/// Missing leaf 0x8000001d.
MissingLeaf0x8000001d,
#[rustfmt::skip]
/// Failed to set number of logical processors sharing cache(CPUID.(EAX=8000001DH,ECX={0}):EAX[25:14]): {1}
NumSharingCache(u32, CheckedAssignError),
#[rustfmt::skip]
/// Failed to set number of logical processors sharing cache (CPUID.(EAX=8000001DH,ECX={0}):EAX[25:14]) due to overflow.
NumSharingCacheOverflow(u32),
}
/// Error type for setting leaf 0x8000001e section of [`super::AmdCpuid::normalize`].
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum ExtendedApicIdError {
/// Failed to set compute unit ID (CPUID.8000001EH:EBX[7:0]): {0}
ComputeUnitId(CheckedAssignError),
/// Failed to set extended APIC ID (CPUID.8000001EH:EAX[31:0]): {0}
ExtendedApicId(CheckedAssignError),
/// Missing leaf 0x8000001e.
MissingLeaf0x8000001e,
/// Failed to set threads per core unit (CPUID:8000001EH:EBX[15:8]): {0}
ThreadPerComputeUnit(CheckedAssignError),
}
// We use this 2nd implementation so we can conveniently define functions only used within
// `normalize`.
#[allow(clippy::multiple_inherent_impl)]
impl super::AmdCpuid {
/// We always use this brand string.
const DEFAULT_BRAND_STRING: &'static [u8; BRAND_STRING_LENGTH] =
b"AMD EPYC\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
/// Applies required modifications to CPUID respective of a vCPU.
///
/// # Errors
///
/// When attempting to access missing leaves or set fields within leaves to values that don't
/// fit.
#[inline]
pub fn normalize(
&mut self,
// The index of the current logical CPU in the range [0..cpu_count].
cpu_index: u8,
// The total number of logical CPUs.
cpu_count: u8,
// The number of logical CPUs per core.
cpus_per_core: u8,
) -> Result<(), NormalizeCpuidError> {
self.passthrough_cache_topology()?;
self.update_structured_extended_entry()?;
self.update_extended_feature_fn_entry()?;
self.update_amd_feature_entry(cpu_count)?;
self.update_extended_cache_topology_entry(cpu_count, cpus_per_core)?;
self.update_extended_apic_id_entry(cpu_index, cpus_per_core)?;
self.update_brand_string_entry()?;
Ok(())
}
/// Passthrough cache topology.
///
/// # Errors
///
/// This function passes through leaves from the host CPUID, if this does not match the AMD
/// specification it is possible to enter an indefinite loop. To avoid this, this will return an
/// error when the host CPUID vendor id does not match the AMD CPUID vendor id.
fn passthrough_cache_topology(&mut self) -> Result<(), PassthroughCacheTopologyError> {
if get_vendor_id_from_host().map_err(PassthroughCacheTopologyError::NoVendorId)?
!= *VENDOR_ID_AMD
{
return Err(PassthroughCacheTopologyError::BadVendorId);
}
// Pass-through host CPUID for leaves 0x8000001e and 0x8000001d.
{
// 0x8000001e - Processor Topology Information
self.0.insert(
CpuidKey::leaf(0x8000001e),
CpuidEntry {
flags: KvmCpuidFlags::EMPTY,
result: CpuidRegisters::from(cpuid(0x8000001e)),
},
);
// 0x8000001d - Cache Topology Information
for subleaf in 0.. {
let result = CpuidRegisters::from(cpuid_count(0x8000001d, subleaf));
// From 'AMD64 Architecture Programmer’s Manual Volume 3: General-Purpose and System
// Instructions':
//
// > To gather information for all cache levels, software must repeatedly execute
// > CPUID with 8000_001Dh in EAX and ECX set to increasing values beginning with 0
// > until a value of 00h is returned in the field CacheType (EAX[4:0]) indicating
// > no more cache descriptions are available for this processor. If CPUID
// > Fn8000_0001_ECX[TopologyExtensions] = 0, then CPUID Fn8000_001Dh is reserved.
//
// On non-AMD hosts this condition may never be true thus this loop may be
// indefinite.
// CPUID Fn8000_0001D_EAX_x[4:0] (Field Name: CacheType)
// Cache type. Identifies the type of cache.
// ```text
// Bits Description
// 00h Null; no more caches.
// 01h Data cache
// 02h Instruction cache
// 03h Unified cache
// 1Fh-04h Reserved.
// ```
let cache_type = result.eax & 15;
if cache_type == 0 {
break;
}
self.0.insert(
CpuidKey::subleaf(0x8000001d, subleaf),
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result,
},
);
}
}
Ok(())
}
/// Updated extended feature fn entry.
fn update_extended_feature_fn_entry(&mut self) -> Result<(), NormalizeCpuidError> {
// set the Topology Extension bit since we use the Extended Cache Topology leaf
let leaf_80000001 = self
.get_mut(&CpuidKey::leaf(0x80000001))
.ok_or(NormalizeCpuidError::MissingLeaf0x80000001)?;
// CPUID Fn8000_0001_ECX[22] (Field Name: TopologyExtensions)
// Topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID
// Fn8000_001E_EDX.
set_bit(&mut leaf_80000001.result.ecx, 22, true);
Ok(())
}
// Update structured extended feature entry.
fn update_structured_extended_entry(&mut self) -> Result<(), NormalizeCpuidError> {
let leaf_7_subleaf_0 = self
.get_mut(&CpuidKey::subleaf(0x7, 0x0))
.ok_or(NormalizeCpuidError::MissingLeaf0x7Subleaf0)?;
// According to AMD64 Architecture Programmer’s Manual, IA32_ARCH_CAPABILITIES MSR is not
// available on AMD. The availability of IA32_ARCH_CAPABILITIES MSR is controlled via
// CPUID.07H(ECX=0):EDX[bit 29]. KVM sets this bit no matter what but this feature is not
// supported by hardware.
set_bit(&mut leaf_7_subleaf_0.result.edx, 29, false);
Ok(())
}
/// Update AMD feature entry.
#[allow(clippy::unwrap_used, clippy::unwrap_in_result)]
fn update_amd_feature_entry(&mut self, cpu_count: u8) -> Result<(), FeatureEntryError> {
/// This value allows at most 64 logical threads within a package.
const THREAD_ID_MAX_SIZE: u32 = 7;
// We don't support more then 128 threads right now.
// It's safe to put them all on the same processor.
let leaf_80000008 = self
.get_mut(&CpuidKey::leaf(0x80000008))
.ok_or(FeatureEntryError::MissingLeaf0x80000008)?;
// CPUID Fn8000_0008_ECX[15:12] (Field Name: ApicIdSize)
// APIC ID size. The number of bits in the initial APIC20[ApicId] value that indicate
// logical processor ID within a package. The size of this field determines the
// maximum number of logical processors (MNLP) that the package could
// theoretically support, and not the actual number of logical processors that are
// implemented or enabled in the package, as indicated by CPUID
// Fn8000_0008_ECX[NC]. A value of zero indicates that legacy methods must be
// used to determine the maximum number of logical processors, as indicated by
// CPUID Fn8000_0008_ECX[NC].
set_range(&mut leaf_80000008.result.ecx, 12..=15, THREAD_ID_MAX_SIZE).unwrap();
// CPUID Fn8000_0008_ECX[7:0] (Field Name: NC)
// Number of physical threads - 1. The number of threads in the processor is NT+1
// (e.g., if NT = 0, then there is one thread). See “Legacy Method” on page 633.
let sub = cpu_count
.checked_sub(1)
.ok_or(FeatureEntryError::NumberOfPhysicalThreadsOverflow)?;
set_range(&mut leaf_80000008.result.ecx, 0..=7, u32::from(sub))
.map_err(FeatureEntryError::NumberOfPhysicalThreads)?;
Ok(())
}
/// Update extended cache topology entry.
#[allow(clippy::unwrap_in_result, clippy::unwrap_used)]
fn update_extended_cache_topology_entry(
&mut self,
cpu_count: u8,
cpus_per_core: u8,
) -> Result<(), ExtendedCacheTopologyError> {
for i in 0.. {
if let Some(subleaf) = self.get_mut(&CpuidKey::subleaf(0x8000001d, i)) {
// CPUID Fn8000_001D_EAX_x[7:5] (Field Name: CacheLevel)
// Cache level. Identifies the level of this cache. Note that the enumeration value
// is not necessarily equal to the cache level.
// ```text
// Bits Description
// 000b Reserved.
// 001b Level 1
// 010b Level 2
// 011b Level 3
// 111b-100b Reserved.
// ```
let cache_level = get_range(subleaf.result.eax, 5..=7);
// CPUID Fn8000_001D_EAX_x[25:14] (Field Name: NumSharingCache)
// Specifies the number of logical processors sharing the cache enumerated by N,
// the value passed to the instruction in ECX. The number of logical processors
// sharing this cache is the value of this field incremented by 1. To determine
// which logical processors are sharing a cache, determine a Share
// Id for each processor as follows:
//
// ShareId = LocalApicId >> log2(NumSharingCache+1)
//
// Logical processors with the same ShareId then share a cache. If
// NumSharingCache+1 is not a power of two, round it up to the next power of two.
match cache_level {
// L1 & L2 Cache
// The L1 & L2 cache is shared by at most 2 hyper-threads
1 | 2 => {
// SAFETY: We know `cpus_per_core > 0` therefore this is always safe.
let sub = u32::from(cpus_per_core.checked_sub(1).unwrap());
set_range(&mut subleaf.result.eax, 14..=25, sub)
.map_err(|err| ExtendedCacheTopologyError::NumSharingCache(i, err))?;
}
// L3 Cache
// The L3 cache is shared among all the logical threads
3 => {
let sub = cpu_count
.checked_sub(1)
.ok_or(ExtendedCacheTopologyError::NumSharingCacheOverflow(i))?;
set_range(&mut subleaf.result.eax, 14..=25, u32::from(sub))
.map_err(|err| ExtendedCacheTopologyError::NumSharingCache(i, err))?;
}
_ => (),
}
} else {
break;
}
}
Ok(())
}
/// Update extended apic id entry
#[allow(clippy::unwrap_used, clippy::unwrap_in_result)]
fn update_extended_apic_id_entry(
&mut self,
cpu_index: u8,
cpus_per_core: u8,
) -> Result<(), ExtendedApicIdError> {
/// 1 node per processor.
const NODES_PER_PROCESSOR: u32 = 0;
// When hyper-threading is enabled each pair of 2 consecutive logical CPUs
// will have the same core id since they represent 2 threads in the same core.
// For Example:
// logical CPU 0 -> core id: 0
// logical CPU 1 -> core id: 0
// logical CPU 2 -> core id: 1
// logical CPU 3 -> core id: 1
//
// SAFETY: We know `cpus_per_core != 0` therefore this is always safe.
let core_id = u32::from(cpu_index.checked_div(cpus_per_core).unwrap());
let leaf_8000001e = self
.get_mut(&CpuidKey::leaf(0x8000001e))
.ok_or(ExtendedApicIdError::MissingLeaf0x8000001e)?;
// CPUID Fn8000_001E_EAX[31:0] (Field Name: ExtendedApicId)
// Extended APIC ID. If MSR0000_001B[ApicEn] = 0, this field is reserved.
set_range(&mut leaf_8000001e.result.eax, 0..=31, u32::from(cpu_index))
.map_err(ExtendedApicIdError::ExtendedApicId)?;
// CPUID Fn8000_001E_EBX[7:0] (Field Name: ComputeUnitId)
// Compute unit ID. Identifies a Compute Unit, which may be one or more physical cores that
// each implement one or more logical processors.
set_range(&mut leaf_8000001e.result.ebx, 0..=7, core_id)
.map_err(ExtendedApicIdError::ComputeUnitId)?;
// CPUID Fn8000_001E_EBX[15:8] (Field Name: ThreadsPerComputeUnit)
// Threads per compute unit (zero-based count). The actual number of threads
// per compute unit is the value of this field + 1. To determine which logical
// processors (threads) belong to a given Compute Unit, determine a ShareId
// for each processor as follows:
//
// ShareId = LocalApicId >> log2(ThreadsPerComputeUnit+1)
//
// Logical processors with the same ShareId then belong to the same Compute
// Unit. (If ThreadsPerComputeUnit+1 is not a power of two, round it up to the
// next power of two).
//
// SAFETY: We know `cpus_per_core > 0` therefore this is always safe.
let sub = u32::from(cpus_per_core.checked_sub(1).unwrap());
set_range(&mut leaf_8000001e.result.ebx, 8..=15, sub)
.map_err(ExtendedApicIdError::ThreadPerComputeUnit)?;
// CPUID Fn8000_001E_ECX[10:8] (Field Name: NodesPerProcessor)
// Specifies the number of nodes in the package/socket in which this logical
// processor resides. Node in this context corresponds to a processor die.
// Encoding is N-1, where N is the number of nodes present in the socket.
//
// SAFETY: We know the value always fits within the range and thus is always safe.
// Set nodes per processor.
set_range(&mut leaf_8000001e.result.ecx, 8..=10, NODES_PER_PROCESSOR).unwrap();
// CPUID Fn8000_001E_ECX[7:0] (Field Name: NodeId)
// Specifies the ID of the node containing the current logical processor. NodeId
// values are unique across the system.
//
// Put all the cpus in the same node.
set_range(&mut leaf_8000001e.result.ecx, 0..=7, 0).unwrap();
Ok(())
}
/// Update brand string entry
fn update_brand_string_entry(&mut self) -> Result<(), NormalizeCpuidError> {
self.apply_brand_string(Self::DEFAULT_BRAND_STRING)
.map_err(NormalizeCpuidError::BrandString)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use super::*;
use crate::cpu_config::x86_64::cpuid::AmdCpuid;
#[test]
fn test_update_structured_extended_entry_invalid() {
// `update_structured_extended_entry()` should exit with MissingLeaf0x7Subleaf0 error for
// CPUID lacking leaf 0x7 / subleaf 0.
let mut cpuid = AmdCpuid(BTreeMap::new());
assert_eq!(
cpuid.update_structured_extended_entry().unwrap_err(),
NormalizeCpuidError::MissingLeaf0x7Subleaf0
);
}
#[test]
fn test_update_structured_extended_entry_valid() {
// `update_structured_extended_entry()` should succeed for CPUID having leaf 0x7 / subleaf
// 0, and bit 29 of EDX (IA32_ARCH_CAPABILITIES MSR enumeration) should be disabled.
let mut cpuid = AmdCpuid(BTreeMap::from([(
CpuidKey {
leaf: 0x7,
subleaf: 0x0,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0,
ebx: 0,
ecx: 0,
edx: u32::MAX,
},
},
)]));
cpuid.update_structured_extended_entry().unwrap();
assert_eq!(
cpuid
.get(&CpuidKey {
leaf: 0x7,
subleaf: 0x0
})
.unwrap()
.result
.edx
& (1 << 29),
0
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/cpuid/intel/mod.rs | src/vmm/src/cpu_config/x86_64/cpuid/intel/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(
clippy::similar_names,
clippy::module_name_repetitions,
clippy::unreadable_literal,
clippy::unsafe_derive_deserialize
)]
/// CPUID normalize implementation.
mod normalize;
pub use normalize::{DeterministicCacheError, NormalizeCpuidError};
use super::{CpuidEntry, CpuidKey, CpuidRegisters, CpuidTrait, KvmCpuidFlags};
/// A structure matching the Intel CPUID specification as described in
/// [Intel® 64 and IA-32 Architectures Software Developer's Manual Combined Volumes 2A, 2B, 2C, and 2D: Instruction Set Reference, A-Z](https://cdrdv2.intel.com/v1/dl/getContent/671110)
/// .
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct IntelCpuid(pub std::collections::BTreeMap<CpuidKey, CpuidEntry>);
impl CpuidTrait for IntelCpuid {
/// Gets a given sub-leaf.
#[inline]
fn get(&self, key: &CpuidKey) -> Option<&CpuidEntry> {
self.0.get(key)
}
/// Gets a given sub-leaf.
#[inline]
fn get_mut(&mut self, key: &CpuidKey) -> Option<&mut CpuidEntry> {
self.0.get_mut(key)
}
}
impl From<kvm_bindings::CpuId> for IntelCpuid {
#[inline]
fn from(kvm_cpuid: kvm_bindings::CpuId) -> Self {
let map = kvm_cpuid
.as_slice()
.iter()
.map(|entry| {
(
CpuidKey {
leaf: entry.function,
subleaf: entry.index,
},
CpuidEntry {
flags: KvmCpuidFlags(entry.flags),
result: CpuidRegisters {
eax: entry.eax,
ebx: entry.ebx,
ecx: entry.ecx,
edx: entry.edx,
},
},
)
})
.collect();
Self(map)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get() {
let cpuid = IntelCpuid(std::collections::BTreeMap::new());
assert_eq!(
cpuid.get(&CpuidKey {
leaf: 0,
subleaf: 0
}),
None
);
}
#[test]
fn get_mut() {
let mut cpuid = IntelCpuid(std::collections::BTreeMap::new());
assert_eq!(
cpuid.get_mut(&CpuidKey {
leaf: 0,
subleaf: 0
}),
None
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/cpuid/intel/normalize.rs | src/vmm/src/cpu_config/x86_64/cpuid/intel/normalize.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::cpu_config::x86_64::cpuid::normalize::{
CheckedAssignError, get_range, set_bit, set_range,
};
use crate::cpu_config::x86_64::cpuid::{
BRAND_STRING_LENGTH, CpuidKey, CpuidRegisters, CpuidTrait, MissingBrandStringLeaves,
host_brand_string,
};
/// Error type for [`super::IntelCpuid::normalize`].
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum NormalizeCpuidError {
/// Failed to set deterministic cache leaf: {0}
DeterministicCache(#[from] DeterministicCacheError),
/// Leaf 0x6 is missing from CPUID.
MissingLeaf6,
/// Leaf 0x7 / subleaf 0 is missing from CPUID.
MissingLeaf7,
/// Leaf 0xA is missing from CPUID.
MissingLeafA,
/// Failed to get brand string: {0}
GetBrandString(DefaultBrandStringError),
/// Failed to set brand string: {0}
ApplyBrandString(MissingBrandStringLeaves),
}
/// Error type for setting leaf 4 section of [`super::IntelCpuid::normalize`].
// `displaydoc::Display` does not support multi-line comments, `rustfmt` will format these comments
// across multiple lines, so we skip formatting here. This can be removed when
// https://github.com/yaahc/displaydoc/issues/44 is resolved.
#[rustfmt::skip]
#[allow(clippy::enum_variant_names)]
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum DeterministicCacheError {
/// Failed to set max addressable core ID in physical package (CPUID.04H:EAX[31:26]): {0}.
MaxCorePerPackage(CheckedAssignError),
/// Failed to set max addressable core ID in physical package (CPUID.04H:EAX[31:26]) due to underflow in cores.
MaxCorePerPackageUnderflow,
/// Failed to set max addressable processor ID sharing cache (CPUID.04H:EAX[25:14]): {0}.
MaxCpusPerCore(CheckedAssignError),
/// Failed to set max addressable processor ID sharing cache (CPUID.04H:EAX[25:14]) due to underflow in cpu count.
MaxCpusPerCoreUnderflow,
}
/// We always use this brand string.
pub const DEFAULT_BRAND_STRING: &[u8; BRAND_STRING_LENGTH] =
b"Intel(R) Xeon(R) Processor\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
pub const DEFAULT_BRAND_STRING_BASE: &[u8; 28] = b"Intel(R) Xeon(R) Processor @";
// We use this 2nd implementation so we can conveniently define functions only used within
// `normalize`.
#[allow(clippy::multiple_inherent_impl)]
impl super::IntelCpuid {
/// Applies required modifications to CPUID respective of a vCPU.
///
/// # Errors
///
/// When attempting to access missing leaves or set fields within leaves to values that don't
/// fit.
#[inline]
pub fn normalize(
&mut self,
// The index of the current logical CPU in the range [0..cpu_count].
_cpu_index: u8,
// The total number of logical CPUs.
cpu_count: u8,
// The number of logical CPUs per core.
cpus_per_core: u8,
) -> Result<(), NormalizeCpuidError> {
self.update_deterministic_cache_entry(cpu_count, cpus_per_core)?;
self.update_power_management_entry()?;
self.update_extended_feature_flags_entry()?;
self.update_performance_monitoring_entry()?;
self.update_extended_topology_v2_entry();
self.update_brand_string_entry()?;
Ok(())
}
/// Update deterministic cache entry
#[allow(clippy::unwrap_in_result)]
fn update_deterministic_cache_entry(
&mut self,
cpu_count: u8,
cpus_per_core: u8,
) -> Result<(), DeterministicCacheError> {
for i in 0.. {
if let Some(subleaf) = self.get_mut(&CpuidKey::subleaf(0x4, i)) {
// If ECX contains an invalid subleaf, EAX/EBX/ECX/EDX return 0 and the
// normalization should not be applied. Exits when it hits such an invalid subleaf.
if subleaf.result.eax == 0
&& subleaf.result.ebx == 0
&& subleaf.result.ecx == 0
&& subleaf.result.edx == 0
{
break;
}
// CPUID.04H:EAX[7:5]
// Cache Level (Starts at 1)
let cache_level = get_range(subleaf.result.eax, 5..=7);
// CPUID.04H:EAX[25:14]
// Maximum number of addressable IDs for logical processors sharing this cache.
// - Add one to the return value to get the result.
// - The nearest power-of-2 integer that is not smaller than (1 + EAX[25:14]) is the
// number of unique initial APIC IDs reserved for addressing different logical
// processors sharing this cache.
// We know `cpus_per_core > 0` therefore `cpus_per_core.checked_sub(1).unwrap()` is
// always safe.
#[allow(clippy::unwrap_used)]
match cache_level {
// L1 & L2 Cache
// The L1 & L2 cache is shared by at most 2 hyperthreads
1 | 2 => {
let sub = u32::from(cpus_per_core.checked_sub(1).unwrap());
set_range(&mut subleaf.result.eax, 14..=25, sub)
.map_err(DeterministicCacheError::MaxCpusPerCore)?;
}
// L3 Cache
// The L3 cache is shared among all the logical threads
3 => {
let sub = u32::from(
cpu_count
.checked_sub(1)
.ok_or(DeterministicCacheError::MaxCpusPerCoreUnderflow)?,
);
set_range(&mut subleaf.result.eax, 14..=25, sub)
.map_err(DeterministicCacheError::MaxCpusPerCore)?;
}
_ => (),
}
// We know `cpus_per_core !=0` therefore this is always safe.
#[allow(clippy::unwrap_used)]
let cores = cpu_count.checked_div(cpus_per_core).unwrap();
// CPUID.04H:EAX[31:26]
// Maximum number of addressable IDs for processor cores in the physical package.
// - Add one to the return value to get the result.
// - The nearest power-of-2 integer that is not smaller than (1 + EAX[31:26]) is the
// number of unique Core_IDs reserved for addressing different processor cores in
// a physical package. Core ID is a subset of bits of the initial APIC ID.
// - The returned value is constant for valid initial values in ECX. Valid ECX
// values start from 0.
// Put all the cores in the same socket
let sub = u32::from(cores)
.checked_sub(1)
.ok_or(DeterministicCacheError::MaxCorePerPackageUnderflow)?;
set_range(&mut subleaf.result.eax, 26..=31, sub)
.map_err(DeterministicCacheError::MaxCorePerPackage)?;
} else {
break;
}
}
Ok(())
}
/// Update power management entry
fn update_power_management_entry(&mut self) -> Result<(), NormalizeCpuidError> {
let leaf_6 = self
.get_mut(&CpuidKey::leaf(0x6))
.ok_or(NormalizeCpuidError::MissingLeaf6)?;
// CPUID.06H:EAX[1]
// Intel Turbo Boost Technology available (see description of IA32_MISC_ENABLE[38]).
set_bit(&mut leaf_6.result.eax, 1, false);
// CPUID.06H:ECX[3]
// The processor supports performance-energy bias preference if CPUID.06H:ECX.SETBH[bit 3]
// is set and it also implies the presence of a new architectural MSR called
// IA32_ENERGY_PERF_BIAS (1B0H).
// Clear X86 EPB feature. No frequency selection in the hypervisor.
set_bit(&mut leaf_6.result.ecx, 3, false);
Ok(())
}
/// Update structured extended feature flags enumeration leaf
fn update_extended_feature_flags_entry(&mut self) -> Result<(), NormalizeCpuidError> {
let leaf_7_0 = self
.get_mut(&CpuidKey::subleaf(0x7, 0))
.ok_or(NormalizeCpuidError::MissingLeaf7)?;
// Set the following bits as recommended in kernel doc. These bits are reserved in AMD.
// - CPUID.07H:EBX[6] (FDP_EXCPTN_ONLY)
// - CPUID.07H:EBX[13] (Deprecates FPU CS and FPU DS values)
// https://lore.kernel.org/all/20220322110712.222449-3-pbonzini@redhat.com/
// https://github.com/torvalds/linux/commit/45016721de3c714902c6f475b705e10ae0bdd801
set_bit(&mut leaf_7_0.result.ebx, 6, true);
set_bit(&mut leaf_7_0.result.ebx, 13, true);
// CPUID.(EAX=07H,ECX=0):ECX[5] (Mnemonic: WAITPKG)
//
// WAITPKG indicates support of user wait instructions (UMONITOR, UMWAIT and TPAUSE).
// - UMONITOR arms address monitoring hardware that checks for store operations on the
// specified address range.
// - UMWAIT instructs the processor to enter an implementation-dependent optimized state
// (either a light-weight power/performance optimized state (C0.1 idle state) or an
// improved power/performance optimized state (C0.2 idle state)) while monitoring the
// address range specified in UMONITOR. The instruction wakes up when the time-stamp
// counter reaches or exceeds the implicit EDX:EAX 64-bit input value.
// - TPAUSE instructs the processor to enter an implementation-dependent optimized state.
// The instruction wakes up when the time-stamp counter reaches or exceeds the implict
// EDX:EAX 64-bit input value.
//
// These instructions may be executed at any privilege level. Even when UMWAIT/TPAUSE are
// executed within a guest, the *physical* processor enters the requested optimized state.
// See Intel SDM vol.3 for more details of the behavior of these instructions in VMX
// non-root operation.
//
// MONITOR/MWAIT instructions are the privileged variant of UMONITOR/UMWAIT and are
// unconditionally emulated as NOP by KVM.
// https://github.com/torvalds/linux/commit/87c00572ba05aa8c9db118da75c608f47eb10b9e
//
// When UMONITOR/UMWAIT/TPAUSE were initially introduced, KVM clears the WAITPKG CPUID bit
// in KVM_GET_SUPPORTED_CPUID by default, and KVM exposed them to guest only when VMM
// explicitly set the bit via KVM_SET_CPUID2 API.
// https://github.com/torvalds/linux/commit/e69e72faa3a0709dd23df6a4ca060a15e99168a1
// However, since v5.8, if the processor supports "enable user wait and pause" in Intel VMX,
// KVM_GET_SUPPORTED_CPUID sets the bit to 1 to let VMM know that it is available. So if the
// returned value is passed to KVM_SET_CPUID2 API as it is, guests are able to execute them.
// https://github.com/torvalds/linux/commit/0abcc8f65cc23b65bc8d1614cc64b02b1641ed7c
//
// Similar to MONITOR/MWAIT, we disable the guest's WAITPKG in order to prevent a guest from
// executing those instructions and putting a physical processor to an idle state which may
// lead to an overhead of waking it up when scheduling another guest on it. By clearing the
// WAITPKG bit in KVM_SET_CPUID2 API, KVM does not set the "enable user wait and pause" bit
// (bit 26) of the secondary processor-based VM-execution control, which makes guests get
// #UD when attempting to executing those instructions.
//
// Note that the WAITPKG bit is reserved on AMD.
set_bit(&mut leaf_7_0.result.ecx, 5, false);
Ok(())
}
/// Update performance monitoring entry
fn update_performance_monitoring_entry(&mut self) -> Result<(), NormalizeCpuidError> {
let leaf_a = self
.get_mut(&CpuidKey::leaf(0xA))
.ok_or(NormalizeCpuidError::MissingLeafA)?;
leaf_a.result = CpuidRegisters {
eax: 0,
ebx: 0,
ecx: 0,
edx: 0,
};
Ok(())
}
/// Update extended topology v2 entry
///
/// CPUID leaf 1FH is a preferred superset to leaf 0xB. Intel recommends using leaf 0x1F when
/// available rather than leaf 0xB.
///
/// Since we don't use any domains than ones supported in leaf 0xB, we just copy contents of
/// leaf 0xB to leaf 0x1F.
fn update_extended_topology_v2_entry(&mut self) {
// Skip if leaf 0x1F does not exist.
if self.get(&CpuidKey::leaf(0x1F)).is_none() {
return;
}
for index in 0.. {
if let Some(subleaf) = self.get(&CpuidKey::subleaf(0xB, index)) {
self.0
.insert(CpuidKey::subleaf(0x1F, index), subleaf.clone());
} else {
break;
}
}
}
fn update_brand_string_entry(&mut self) -> Result<(), NormalizeCpuidError> {
// Get host brand string.
let host_brand_string: [u8; BRAND_STRING_LENGTH] = host_brand_string();
let default_brand_string =
default_brand_string(host_brand_string).unwrap_or(*DEFAULT_BRAND_STRING);
self.apply_brand_string(&default_brand_string)
.map_err(NormalizeCpuidError::ApplyBrandString)?;
Ok(())
}
}
/// Error type for [`default_brand_string`].
#[derive(Debug, Eq, PartialEq, thiserror::Error, displaydoc::Display)]
pub enum DefaultBrandStringError {
/// Missing frequency: {0:?}.
MissingFrequency([u8; BRAND_STRING_LENGTH]),
/// Missing space: {0:?}.
MissingSpace([u8; BRAND_STRING_LENGTH]),
/// Insufficient space in brand string.
Overflow,
}
/// Normalize brand string to a generic Xeon(R) processor, with the actual CPU frequency
///
/// # Errors
///
/// When unable to parse the host brand string.
/// `brand_string.try_into().unwrap()` cannot panic as we know
/// `brand_string.len() == BRAND_STRING_LENGTH`
///
/// # Panics
///
/// Never.
// As we pass through host frequency, we require CPUID and thus `cfg(cpuid)`.
// TODO: Use `split_array_ref`
// (https://github.com/firecracker-microvm/firecracker/issues/3347)
#[allow(clippy::indexing_slicing, clippy::arithmetic_side_effects)]
#[inline]
fn default_brand_string(
// Host brand string.
// This could look like "Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz".
// or this could look like "Intel(R) Xeon(R) Platinum 8275CL CPU\0\0\0\0\0\0\0\0\0\0".
host_brand_string: [u8; BRAND_STRING_LENGTH],
) -> Result<[u8; BRAND_STRING_LENGTH], DefaultBrandStringError> {
// The slice of the host string before the frequency suffix
// e.g. b"Intel(R) Xeon(R) Processor Platinum 8275CL CPU @ 3.00" and b"GHz"
let (before, after) = 'outer: {
for i in 0..host_brand_string.len() {
// Find position of b"THz" or b"GHz" or b"MHz"
if let [b'T' | b'G' | b'M', b'H', b'z', ..] = host_brand_string[i..] {
break 'outer Ok(host_brand_string.split_at(i));
}
}
Err(DefaultBrandStringError::MissingFrequency(host_brand_string))
}?;
debug_assert_eq!(
before.len().checked_add(after.len()),
Some(BRAND_STRING_LENGTH)
);
// We iterate from the end until hitting a space, getting the frequency number
// e.g. b"Intel(R) Xeon(R) Processor Platinum 8275CL CPU @ " and b"3.00"
let (_, frequency) = 'outer: {
for i in (0..before.len()).rev() {
let c = before[i];
match c {
b' ' => break 'outer Ok(before.split_at(i)),
b'0'..=b'9' | b'.' => (),
_ => break,
}
}
Err(DefaultBrandStringError::MissingSpace(host_brand_string))
}?;
debug_assert!(frequency.len() <= before.len());
debug_assert!(
matches!(frequency.len().checked_add(after.len()), Some(x) if x <= BRAND_STRING_LENGTH)
);
debug_assert!(DEFAULT_BRAND_STRING_BASE.len() <= BRAND_STRING_LENGTH);
debug_assert!(BRAND_STRING_LENGTH.checked_mul(2).is_some());
// As `DEFAULT_BRAND_STRING_BASE.len() + frequency.len() + after.len()` is guaranteed
// to be less than or equal to `2*BRAND_STRING_LENGTH` and we know
// `2*BRAND_STRING_LENGTH <= usize::MAX` since `BRAND_STRING_LENGTH==48`, this is always
// safe.
let len = DEFAULT_BRAND_STRING_BASE.len() + frequency.len() + after.len();
let brand_string = DEFAULT_BRAND_STRING_BASE
.iter()
.copied()
// Include frequency e.g. "3.00"
.chain(frequency.iter().copied())
// Include frequency suffix e.g. "GHz"
.chain(after.iter().copied())
// Pad with 0s to `BRAND_STRING_LENGTH`
.chain(std::iter::repeat_n(
b'\0',
BRAND_STRING_LENGTH
.checked_sub(len)
.ok_or(DefaultBrandStringError::Overflow)?,
))
.collect::<Vec<_>>();
debug_assert_eq!(brand_string.len(), BRAND_STRING_LENGTH);
// Padding ensures `brand_string.len() == BRAND_STRING_LENGTH` thus
// `brand_string.try_into().unwrap()` is safe.
#[allow(clippy::unwrap_used)]
Ok(brand_string.try_into().unwrap())
}
#[cfg(test)]
mod tests {
#![allow(
clippy::undocumented_unsafe_blocks,
clippy::unwrap_used,
clippy::as_conversions
)]
use std::collections::BTreeMap;
use std::ffi::CStr;
use super::*;
use crate::cpu_config::x86_64::cpuid::{CpuidEntry, IntelCpuid, KvmCpuidFlags};
#[test]
fn default_brand_string_test() {
let brand_string = b"Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz\0\0";
let ok_result = default_brand_string(*brand_string);
let expected = Ok(*b"Intel(R) Xeon(R) Processor @ 3.00GHz\0\0\0\0\0\0\0\0\0\0\0\0");
assert_eq!(ok_result, expected);
}
#[test]
fn default_brand_string_test_missing_frequency() {
let brand_string = b"Intel(R) Xeon(R) Platinum 8275CL CPU @ \0\0\0\0\0\0\0\0\0";
let result = default_brand_string(*brand_string);
let expected = Err(DefaultBrandStringError::MissingFrequency(*brand_string));
assert_eq!(result, expected);
}
#[test]
fn default_brand_string_test_missing_space() {
let brand_string = b"Intel(R) Xeon(R) Platinum 8275CL CPU @3.00GHz\0\0\0";
let result = default_brand_string(*brand_string);
let expected = Err(DefaultBrandStringError::MissingSpace(*brand_string));
assert_eq!(result, expected);
}
#[test]
fn default_brand_string_test_overflow() {
let brand_string = b"@ 123456789876543212345678987654321234567898GHz\0";
let result = default_brand_string(*brand_string);
assert_eq!(
result,
Err(DefaultBrandStringError::Overflow),
"{:?}",
result
.as_ref()
.map(|s| CStr::from_bytes_until_nul(s).unwrap()),
);
}
#[test]
fn test_update_extended_feature_flags_entry() {
let mut cpuid = IntelCpuid(BTreeMap::from([(
CpuidKey {
leaf: 0x7,
subleaf: 0,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
..Default::default()
},
)]));
cpuid.update_extended_feature_flags_entry().unwrap();
let leaf_7_0 = cpuid
.get(&CpuidKey {
leaf: 0x7,
subleaf: 0,
})
.unwrap();
assert!((leaf_7_0.result.ebx & (1 << 6)) > 0);
assert!((leaf_7_0.result.ebx & (1 << 13)) > 0);
assert_eq!((leaf_7_0.result.ecx & (1 << 5)), 0);
}
#[test]
fn test_update_extended_topology_v2_entry_no_leaf_0x1f() {
let mut cpuid = IntelCpuid(BTreeMap::from([(
CpuidKey {
leaf: 0xB,
subleaf: 0,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
..Default::default()
},
)]));
cpuid.update_extended_topology_v2_entry();
assert!(
cpuid
.get(&CpuidKey {
leaf: 0x1F,
subleaf: 0,
})
.is_none()
);
}
#[test]
fn test_update_extended_topology_v2_entry() {
let mut cpuid = IntelCpuid(BTreeMap::from([
(
CpuidKey {
leaf: 0xB,
subleaf: 0,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0x1,
ebx: 0x2,
ecx: 0x3,
edx: 0x4,
},
},
),
(
CpuidKey {
leaf: 0xB,
subleaf: 1,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0xa,
ebx: 0xb,
ecx: 0xc,
edx: 0xd,
},
},
),
(
CpuidKey {
leaf: 0x1F,
subleaf: 0,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0xFFFFFFFF,
ebx: 0xFFFFFFFF,
ecx: 0xFFFFFFFF,
edx: 0xFFFFFFFF,
},
},
),
]));
cpuid.update_extended_topology_v2_entry();
// Check leaf 0x1F, subleaf 0 is updated.
let leaf_1f_0 = cpuid
.get(&CpuidKey {
leaf: 0x1F,
subleaf: 0,
})
.unwrap();
assert_eq!(leaf_1f_0.result.eax, 0x1);
assert_eq!(leaf_1f_0.result.ebx, 0x2);
assert_eq!(leaf_1f_0.result.ecx, 0x3);
assert_eq!(leaf_1f_0.result.edx, 0x4);
// Check lefa 0x1F, subleaf 1 is inserted.
let leaf_1f_1 = cpuid
.get(&CpuidKey {
leaf: 0x1F,
subleaf: 1,
})
.unwrap();
assert_eq!(leaf_1f_1.result.eax, 0xa);
assert_eq!(leaf_1f_1.result.ebx, 0xb);
assert_eq!(leaf_1f_1.result.ecx, 0xc);
assert_eq!(leaf_1f_1.result.edx, 0xd);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/static_cpu_templates/t2.rs | src/vmm/src/cpu_config/x86_64/static_cpu_templates/t2.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::cpu_config::templates::{CustomCpuTemplate, RegisterValueFilter};
use crate::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use crate::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegister, CpuidRegisterModifier,
};
/// T2 template
///
/// Mask CPUID to make exposed CPU features as close as possbile to AWS T2 instance.
///
/// CPUID dump taken in t2.micro on 2023-06-15:
/// =====
/// $ cpuid -1 -r
/// Disclaimer: cpuid may not support decoding of all cpuid registers.
/// CPU:
/// 0x00000000 0x00: eax=0x0000000d ebx=0x756e6547 ecx=0x6c65746e edx=0x49656e69
/// 0x00000001 0x00: eax=0x000306f2 ebx=0x00010800 ecx=0xfffa3203 edx=0x178bfbff
/// 0x00000002 0x00: eax=0x76036301 ebx=0x00f0b5ff ecx=0x00000000 edx=0x00c10000
/// 0x00000003 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x00000004 0x00: eax=0x00004121 ebx=0x01c0003f ecx=0x0000003f edx=0x00000000
/// 0x00000004 0x01: eax=0x00004122 ebx=0x01c0003f ecx=0x0000003f edx=0x00000000
/// 0x00000004 0x02: eax=0x00004143 ebx=0x01c0003f ecx=0x000001ff edx=0x00000000
/// 0x00000004 0x03: eax=0x0007c163 ebx=0x04c0003f ecx=0x00005fff edx=0x00000006
/// 0x00000005 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x00000006 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x00000007 0x00: eax=0x00000000 ebx=0x000007a9 ecx=0x00000000 edx=0x00000000
/// 0x00000008 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x00000009 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x0000000a 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x0000000b 0x00: eax=0x00000001 ebx=0x00000001 ecx=0x00000100 edx=0x00000000
/// 0x0000000b 0x01: eax=0x00000005 ebx=0x00000001 ecx=0x00000201 edx=0x00000000
/// 0x0000000c 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x0000000d 0x00: eax=0x00000007 ebx=0x00000340 ecx=0x00000340 edx=0x00000000
/// 0x0000000d 0x01: eax=0x00000001 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x0000000d 0x02: eax=0x00000100 ebx=0x00000240 ecx=0x00000000 edx=0x00000000
/// 0x40000000 0x00: eax=0x40000005 ebx=0x566e6558 ecx=0x65584d4d edx=0x4d4d566e
/// 0x40000001 0x00: eax=0x0004000b ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x40000002 0x00: eax=0x00000001 ebx=0x40000000 ecx=0x00000000 edx=0x00000000
/// 0x40000003 0x00: eax=0x00000006 ebx=0x00000002 ecx=0x00249f0a edx=0x00000001
/// 0x40000003 0x02: eax=0x9b842c23 ebx=0x007c8980 ecx=0xd5551b14 edx=0xffffffff
/// 0x40000004 0x00: eax=0x0000001c ebx=0x00000000 ecx=0x0000762b edx=0x00000000
/// 0x40000005 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80000000 0x00: eax=0x80000008 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80000001 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000021 edx=0x28100800
/// 0x80000002 0x00: eax=0x65746e49 ebx=0x2952286c ecx=0x6f655820 edx=0x2952286e
/// 0x80000003 0x00: eax=0x55504320 ebx=0x2d354520 ecx=0x36373632 edx=0x20337620
/// 0x80000004 0x00: eax=0x2e322040 ebx=0x48473034 ecx=0x0000007a edx=0x00000000
/// 0x80000005 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80000006 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x01006040 edx=0x00000000
/// 0x80000007 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80000008 0x00: eax=0x0000302e ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80860000 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0xc0000000 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// =====
///
/// References:
/// - Intel SDM: <https://cdrdv2.intel.com/v1/dl/getContent/671200>
#[allow(clippy::unusual_byte_groupings)]
pub fn t2() -> CustomCpuTemplate {
CustomCpuTemplate {
cpuid_modifiers: vec![
CpuidLeafModifier {
leaf: 0x1,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EAX: Version Information
// - Bits 03-00: Stepping ID.
// - Bits 07-04: Model.
// - Bits 11-08: Family.
// - Bits 13-12: Processor Type.
// - Bits 19-16: Extended Model ID.
// - Bits 27-20: Extended Family ID.
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_11111111_1111_00_11_1111_1111_1111,
value: 0b0000_00000000_0011_00_00_0110_1111_0010,
},
},
// ECX: Feature Information
// - Bit 02: DTES64
// - Bit 03: MONITOR
// - Bit 04: DS-CPL
// - Bit 05: VMX
// - Bit 06: SMX
// - Bit 07: EIST
// - Bit 08: TM2
// - Bit 10: CNXT-ID
// - Bit 11: SDBG
// - Bit 14: xTPR Update Control
// - Bit 15: PDCM
// - Bit 18: DCA
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0100_1100_1101_1111_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX: Feature Information
// - Bit 07: MCE
// - Bit 12: MTRR
// - Bit 18: PSN
// - Bit 21: DS
// - Bit 22: ACPI
// - Bit 27: SS
// - Bit 29: TM
// - Bit 30: IA-64 (deprecated) https://www.intel.com/content/dam/www/public/us/en/documents/manuals/itanium-architecture-vol-4-manual.pdf
// - Bit 31: PBE
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b1110_1000_0110_0100_0001_0000_1000_0000,
value: 0b0000_0000_0000_0000_0001_0000_1000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x7,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EBX:
// - Bit 02: SGX
// - Bit 04: HLE
// - Bit 09: Enhanced REP MOVSB/STOSB
// - Bit 11: RTM
// - Bit 12: RDT-M
// - Bit 14: MPX
// - Bit 15: RDT-A
// - Bit 16: AVX512F
// - Bit 17: AVX512DQ
// - Bit 18: RDSEED
// - Bit 19: ADX
// - Bit 21: AVX512_IFMA
// - Bit 22: PCOMMIT (deprecated) https://www.intel.com/content/www/us/en/developer/articles/technical/deprecate-pcommit-instruction.html
// - Bit 23: CLFLUSHOPT
// - Bit 24: CLWB
// - Bit 25: Intel Processor Trace
// - Bit 26: AVX512PF
// - Bit 27: AVX512ER
// - Bit 28: AVX512CD
// - Bit 29: SHA
// - Bit 30: AVX512BW
// - Bit 31: AVX512VL
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b1111_1111_1110_1111_1101_1010_0001_0100,
value: 0b0000_0000_0000_0000_0000_0010_0000_0000,
},
},
// ECX:
// - Bit 01: AVX512_VBMI
// - Bit 02: UMIP
// - Bit 03: PKU
// - Bit 04: OSPKE
// - Bit 06: AVX512_VBMI2
// - Bit 08: GFNI
// - Bit 09: VAES
// - Bit 10: VPCLMULQDQ
// - Bit 11: AVX512_VNNI
// - Bit 12: AVX512_BITALG
// - Bit 14: AVX512_VPOPCNTDQ
// - Bit 16: LA57
// - Bit 22: RDPID
// - Bit 30: SGX_LC
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0100_0000_0100_0001_0101_1111_0101_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 02: AVX512_4VNNIW
// - Bit 03: AVX512_4FMAPS
// - Bit 04: Fast Short REP MOV
// - Bit 08: AVX512_VP2INTERSECT
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0001_0001_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bits 04-03: MPX state
// - Bits 07-05: AVX-512 state
// - Bit 09: PKRU state
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_00_1_0_111_11_000,
value: 0b0000_0000_0000_0000_0000_00_0_0_000_00_000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x1,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bit 01: Supports XSAVEC and the compacted form of XRSTOR
// - Bit 02: Supports XGETBV
// - Bit 03: Supports XSAVES/XRSTORS and IA32_XSS
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0000_0000_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000001,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// ECX:
// - Bit 08: PREFETCHW
// - Bit 29: MONITORX and MWAITX
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0010_0000_0000_0000_0000_0001_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 26: 1-GByte pages
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0100_0000_0000_0000_0000_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000008,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EBX:
// - Bit 09: WBNOINVD
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0010_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
],
msr_modifiers: vec![],
..Default::default()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/static_cpu_templates/t2s.rs | src/vmm/src/cpu_config/x86_64/static_cpu_templates/t2s.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::cpu_config::templates::{CustomCpuTemplate, RegisterValueFilter};
use crate::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use crate::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegister, CpuidRegisterModifier, RegisterModifier,
};
/// T2S template
///
/// Mask CPUID to make exposed CPU features as close as possbile to AWS T2 instance and allow
/// migrating snapshots between hosts with Intel Skylake and Cascade Lake securely.
///
/// Reference:
/// - Intel SDM: <https://cdrdv2.intel.com/v1/dl/getContent/671200>
/// - CPUID Enumeration and Architectural MSRs: <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/cpuid-enumeration-and-architectural-msrs.html>
#[allow(clippy::unusual_byte_groupings)]
pub fn t2s() -> CustomCpuTemplate {
CustomCpuTemplate {
cpuid_modifiers: vec![
CpuidLeafModifier {
leaf: 0x1,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EAX: Version Information
// - Bits 03-00: Stepping ID.
// - Bits 07-04: Model.
// - Bits 11-08: Family.
// - Bits 13-12: Processor Type.
// - Bits 19-16: Extended Model ID.
// - Bits 27-20: Extended Family ID.
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_11111111_1111_00_11_1111_1111_1111,
value: 0b0000_00000000_0011_00_00_0110_1111_0010,
},
},
// ECX: Feature Information
// - Bit 02: DTES64
// - Bit 03: MONITOR
// - Bit 04: DS-CPL
// - Bit 05: VMX
// - Bit 06: SMX
// - Bit 07: EIST
// - Bit 08: TM2
// - Bit 10: CNXT-ID
// - Bit 11: SDBG
// - Bit 14: xTPR Update Control
// - Bit 15: PDCM
// - Bit 18: DCA
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0100_1100_1101_1111_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX: Feature Information
// - Bit 07: MCE
// - Bit 12: MTRR
// - Bit 18: PSN
// - Bit 21: DS
// - Bit 22: ACPI
// - Bit 27: SS
// - Bit 29: TM
// - Bit 30: IA-64 (deprecated) https://www.intel.com/content/dam/www/public/us/en/documents/manuals/itanium-architecture-vol-4-manual.pdf
// - Bit 31: PBE
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b1110_1000_0110_0100_0001_0000_1000_0000,
value: 0b0000_0000_0000_0000_0001_0000_1000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x7,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EBX:
// - Bit 02: SGX
// - Bit 04: HLE
// - Bit 09: Enhanced REP MOVSB/STOSB
// - Bit 11: RTM
// - Bit 12: RDT-M
// - Bit 14: MPX
// - Bit 15: RDT-A
// - Bit 16: AVX512F
// - Bit 17: AVX512DQ
// - Bit 18: RDSEED
// - Bit 19: ADX
// - Bit 21: AVX512_IFMA
// - Bit 22: PCOMMIT (deprecated) https://www.intel.com/content/www/us/en/developer/articles/technical/deprecate-pcommit-instruction.html
// - Bit 23: CLFLUSHOPT
// - Bit 24: CLWB
// - Bit 25: Intel Processor Trace
// - Bit 26: AVX512PF
// - Bit 27: AVX512ER
// - Bit 28: AVX512CD
// - Bit 29: SHA
// - Bit 30: AVX512BW
// - Bit 31: AVX512VL
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b1111_1111_1110_1111_1101_1010_0001_0100,
value: 0b0000_0000_0000_0000_0000_0010_0000_0000,
},
},
// ECX:
// - Bit 01: AVX512_VBMI
// - Bit 02: UMIP
// - Bit 03: PKU
// - Bit 04: OSPKE
// - Bit 06: AVX512_VBMI2
// - Bit 08: GFNI
// - Bit 09: VAES
// - Bit 10: VPCLMULQDQ
// - Bit 11: AVX512_VNNI
// - Bit 12: AVX512_BITALG
// - Bit 14: AVX512_VPOPCNTDQ
// - Bit 16: LA57
// - Bit 22: RDPID
// - Bit 30: SGX_LC
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0100_0000_0100_0001_0101_1111_0101_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 02: AVX512_4VNNIW
// - Bit 03: AVX512_4FMAPS
// - Bit 04: Fast Short REP MOV
// - Bit 08: AVX512_VP2INTERSECT
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0001_0001_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bits 04-03: MPX state
// - Bits 07-05: AVX-512 state
// - Bit 09: PKRU state
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_00_1_0_111_11_000,
value: 0b0000_0000_0000_0000_0000_00_0_0_000_00_000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x1,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bit 01: Supports XSAVEC and the compacted form of XRSTOR
// - Bit 02: Supports XGETBV
// - Bit 03: Supports XSAVES/XRSTORS and IA32_XSS
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0000_0000_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000001,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// ECX:
// - Bit 08: PREFETCHW
// - Bit 29: MONITORX and MWAITX
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0010_0000_0000_0000_0000_0001_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 26: 1-GByte pages
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0100_0000_0000_0000_0000_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000008,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EBX:
// - Bit 09: WBNOINVD
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0010_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
],
msr_modifiers: vec![
// IA32_ARCH_CAPABILITIES:
// - Bit 00: RDCL_NO
// - Bit 01: IBRS_ALL
// - Bit 02: RSBA
// - Bit 03: SKIP_L1DFL_VMENTRY
// - Bit 04: SSB_NO
// - Bit 05: MDS_NO
// - Bit 06: IF_PSCHANGE_MC_NO
// - Bit 07: TSX_CTRL
// - Bit 08: TAA_NO
// - Bit 09: MCU_CONTROL
// - Bit 10: MISC_PACKAGE_CTLS
// - Bit 11: ENERGY_FILTERING_CTL
// - Bit 12: DOITM
// - Bit 13: SBDR_SSDP_NO
// - Bit 14: FBSDP_NO
// - Bit 15: PSDP_NO
// - Bit 16: Reserved
// - Bit 17: FB_CLEAR
// - Bit 18: FB_CLEAR_CTRL
// - Bit 19: RRSBA
// - Bit 20: BHI_NO
// - Bit 21: XAPIC_DISABLE_STATUS
// - Bit 22: Reserved
// - Bit 23: OVERCLOCKING_STATUS
// - Bit 24: PBRSB_NO
// - Bit 26: GDS_NO
// - BIT 27: RFDS_NO
// - Bits 63-25: Reserved
RegisterModifier {
addr: 0x10a,
bitmap: RegisterValueFilter {
filter: 0b1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_1100_0000_1000_0000_1100_0100_1100,
},
}],
..Default::default()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/static_cpu_templates/mod.rs | src/vmm/src/cpu_config/x86_64/static_cpu_templates/mod.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use derive_more::Display;
use serde::{Deserialize, Serialize};
use crate::arch::x86_64::cpu_model::{
CASCADE_LAKE_FMS, CpuModel, ICE_LAKE_FMS, MILAN_FMS, SKYLAKE_FMS,
};
use crate::cpu_config::x86_64::cpuid::{VENDOR_ID_AMD, VENDOR_ID_INTEL};
/// Module with C3 CPU template for x86_64
pub mod c3;
/// Module with T2 CPU template for x86_64
pub mod t2;
/// Module with T2A CPU template for x86_64
pub mod t2a;
/// Module with T2CL CPU template for x86_64
pub mod t2cl;
/// Module with T2S CPU template for x86_64
pub mod t2s;
/// Template types available for configuring the x86 CPU features that map
/// to EC2 instances.
#[derive(Debug, Default, Display, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum StaticCpuTemplate {
/// C3 Template.
#[display("C3")]
C3,
/// T2 Template.
#[display("T2")]
T2,
/// T2S Template.
#[display("T2S")]
T2S,
/// No CPU template is used.
#[default]
#[display("None")]
None,
/// T2CL Template.
#[display("T2CL")]
T2CL,
/// T2A Template.
#[display("T2A")]
T2A,
}
impl StaticCpuTemplate {
/// Check if no template specified
pub fn is_none(&self) -> bool {
self == &StaticCpuTemplate::None
}
/// Return the supported vendor for the CPU template.
pub fn get_supported_vendor(&self) -> &'static [u8; 12] {
match self {
StaticCpuTemplate::C3 => VENDOR_ID_INTEL,
StaticCpuTemplate::T2 => VENDOR_ID_INTEL,
StaticCpuTemplate::T2S => VENDOR_ID_INTEL,
StaticCpuTemplate::T2CL => VENDOR_ID_INTEL,
StaticCpuTemplate::T2A => VENDOR_ID_AMD,
StaticCpuTemplate::None => unreachable!(), // Should be handled in advance
}
}
/// Return supported CPU models for the CPU template.
pub fn get_supported_cpu_models(&self) -> &'static [CpuModel] {
match self {
StaticCpuTemplate::C3 => &[SKYLAKE_FMS, CASCADE_LAKE_FMS, ICE_LAKE_FMS],
StaticCpuTemplate::T2 => &[SKYLAKE_FMS, CASCADE_LAKE_FMS, ICE_LAKE_FMS],
StaticCpuTemplate::T2S => &[SKYLAKE_FMS, CASCADE_LAKE_FMS],
StaticCpuTemplate::T2CL => &[CASCADE_LAKE_FMS, ICE_LAKE_FMS],
StaticCpuTemplate::T2A => &[MILAN_FMS],
StaticCpuTemplate::None => unreachable!(), // Should be handled in advance
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cpu_config::test_utils::get_json_template;
#[test]
fn verify_consistency_with_json_templates() {
let static_templates = [
(c3::c3(), "C3.json"),
(t2::t2(), "T2.json"),
(t2s::t2s(), "T2S.json"),
(t2cl::t2cl(), "T2CL.json"),
(t2a::t2a(), "T2A.json"),
];
for (hardcoded_template, filename) in static_templates {
let json_template = get_json_template(filename);
assert_eq!(hardcoded_template, json_template);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/static_cpu_templates/c3.rs | src/vmm/src/cpu_config/x86_64/static_cpu_templates/c3.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::cpu_config::templates::{CustomCpuTemplate, RegisterValueFilter};
use crate::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use crate::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegister, CpuidRegisterModifier,
};
/// C3 CPU template.
///
/// Mask CPUID to make exposed CPU features as close as possbile to AWS C3 instance.
///
/// CPUID dump taken in c3.large on 2023-06-15:
/// =====
/// $ cpuid -1 -r
/// Disclaimer: cpuid may not support decoding of all cpuid registers.
/// CPU:
/// 0x00000000 0x00: eax=0x0000000d ebx=0x756e6547 ecx=0x6c65746e edx=0x49656e69
/// 0x00000001 0x00: eax=0x000306e4 ebx=0x01020800 ecx=0xffba2203 edx=0x178bfbff
/// 0x00000002 0x00: eax=0x76036301 ebx=0x00f0b2ff ecx=0x00000000 edx=0x00ca0000
/// 0x00000003 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x00000004 0x00: eax=0x00004121 ebx=0x01c0003f ecx=0x0000003f edx=0x00000000
/// 0x00000004 0x01: eax=0x00004122 ebx=0x01c0003f ecx=0x0000003f edx=0x00000000
/// 0x00000004 0x02: eax=0x00004143 ebx=0x01c0003f ecx=0x000001ff edx=0x00000000
/// 0x00000004 0x03: eax=0x00004163 ebx=0x04c0003f ecx=0x00004fff edx=0x00000006
/// 0x00000005 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x00000006 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x00000007 0x00: eax=0x00000000 ebx=0x00000281 ecx=0x00000000 edx=0x00000000
/// 0x00000008 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x00000009 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x0000000a 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x0000000b 0x00: eax=0x00000001 ebx=0x00000002 ecx=0x00000100 edx=0x00000000
/// 0x0000000b 0x01: eax=0x00000005 ebx=0x00000001 ecx=0x00000201 edx=0x00000000
/// 0x0000000c 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x0000000d 0x00: eax=0x00000007 ebx=0x00000340 ecx=0x00000340 edx=0x00000000
/// 0x0000000d 0x01: eax=0x00000001 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x0000000d 0x02: eax=0x00000100 ebx=0x00000240 ecx=0x00000000 edx=0x00000000
/// 0x40000000 0x00: eax=0x40000005 ebx=0x566e6558 ecx=0x65584d4d edx=0x4d4d566e
/// 0x40000001 0x00: eax=0x0004000b ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x40000002 0x00: eax=0x00000001 ebx=0x40000000 ecx=0x00000000 edx=0x00000000
/// 0x40000003 0x00: eax=0x00000006 ebx=0x00000002 ecx=0x002a9f50 edx=0x00000001
/// 0x40000003 0x02: eax=0x1387329d ebx=0x00f6b809 ecx=0xb74bc70a edx=0xffffffff
/// 0x40000004 0x00: eax=0x0000001c ebx=0x00000000 ecx=0x00002b86 edx=0x00000000
/// 0x40000005 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80000000 0x00: eax=0x80000008 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80000001 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000001 edx=0x28100800
/// 0x80000002 0x00: eax=0x20202020 ebx=0x6e492020 ecx=0x286c6574 edx=0x58202952
/// 0x80000003 0x00: eax=0x286e6f65 ebx=0x43202952 ecx=0x45205550 edx=0x36322d35
/// 0x80000004 0x00: eax=0x76203038 ebx=0x20402032 ecx=0x30382e32 edx=0x007a4847
/// 0x80000005 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80000006 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x01006040 edx=0x00000000
/// 0x80000007 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80000008 0x00: eax=0x0000302e ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0x80860000 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// 0xc0000000 0x00: eax=0x00000000 ebx=0x00000000 ecx=0x00000000 edx=0x00000000
/// =====
///
/// References:
/// - Intel SDM: <https://cdrdv2.intel.com/v1/dl/getContent/671200>
#[allow(clippy::unusual_byte_groupings)]
pub fn c3() -> CustomCpuTemplate {
CustomCpuTemplate {
cpuid_modifiers: vec![
CpuidLeafModifier {
leaf: 0x1,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EAX: Version Information
// - Bits 03-00: Stepping ID.
// - Bits 07-04: Model.
// - Bits 11-08: Family.
// - Bits 13-12: Processor Type.
// - Bits 19-16: Extended Model ID.
// - Bits 27-20: Extended Family ID.
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_11111111_1111_00_11_1111_1111_1111,
value: 0b0000_00000000_0011_00_00_0110_1110_0100,
},
},
// ECX: Feature Information
// - Bit 02: DTES64
// - Bit 03: MONITOR
// - Bit 04: DS-CPL
// - Bit 05: VMX
// - Bit 08: TM2
// - Bit 10: CNXT-ID
// - Bit 11: SDBG
// - Bit 12: FMA
// - Bit 14: xTPR Update Control
// - Bit 15: PDCM
// - Bit 22: MOVBE
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0100_0000_1101_1101_0011_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX: Feature Information
// - Bit 07: MCE
// - Bit 12: MTRR
// - Bit 18: PSN
// - Bit 21: DS
// - Bit 22: ACPI
// - Bit 27: SS
// - Bit 29: TM
// - Bit 31: PBE
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b1010_1000_0110_0100_0001_0000_1000_0000,
value: 0b0000_0000_0000_0000_0001_0000_1000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x7,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EBX:
// - Bit 02: SGX
// - Bit 03: BMI1
// - Bit 04: HLE
// - Bit 05: AVX2
// - Bit 08: BMI2
// - Bit 10: INVPCID
// - Bit 11: RTM
// - Bit 12: RDT-M
// - Bit 14: MPX
// - Bit 15: RDT-A
// - Bit 16: AVX512F
// - Bit 17: AVX512DQ
// - Bit 18: RDSEED
// - Bit 19: ADX
// - Bit 21: AVX512_IFMA
// - Bit 23: CLFLUSHOPT
// - Bit 24: CLWB
// - Bit 25: Intel Processor Trace
// - Bit 26: AVX512PF
// - Bit 27: AVX512ER
// - Bit 28: AVX512CD
// - Bit 29: SHA
// - Bit 30: AVX512BW
// - Bit 31: AVX512VL
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b1111_1111_1010_1111_1101_1101_0011_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// ECX:
// - Bit 01: AVX512_VBMI
// - Bit 02: UMIP
// - Bit 03: PKU
// - Bit 04: OSPKE
// - Bit 11: AVX512_VNNI
// - Bit 14: AVX512_VPOPCNTDQ
// - Bit 16: LA57
// - Bit 22: RDPID
// - Bit 30: SGX_LC
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0100_0000_0100_0001_0100_1000_0001_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 02: AVX512_4VNNIW
// - Bit 03: AVX512_4FMAPS
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0000_0000_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bits 04-03: MPX state
// - Bits 07-05: AVX-512 state
// - Bit 09: PKRU state
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_00_1_0_111_11_000,
value: 0b0000_0000_0000_0000_0000_00_0_0_000_00_000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x1,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bit 01: Supports XSAVEC and the compacted form of XRSTOR
// - Bit 02: Supports XGETBV
// - Bit 03: Supports XSAVES/XRSTORS and IA32_XSS
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0000_0000_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000001,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// ECX:
// - Bit 05: LZCNT
// - Bit 08: PREFETCHW
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0001_0010_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 26: 1-GByte pages
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0100_0000_0000_0000_0000_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
],
msr_modifiers: vec![],
..Default::default()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/static_cpu_templates/t2a.rs | src/vmm/src/cpu_config/x86_64/static_cpu_templates/t2a.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::cpu_config::templates::{CustomCpuTemplate, RegisterValueFilter};
use crate::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use crate::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegister, CpuidRegisterModifier,
};
/// T2A template
///
/// Provide instruction set feature partity with Intel Cascade Lake or later using T2CL template.
///
/// References:
/// - Intel SDM: <https://cdrdv2.intel.com/v1/dl/getContent/671200>
/// - AMD APM: <https://www.amd.com/system/files/TechDocs/40332.pdf>
/// - CPUID Enumeration and Architectural MSRs: <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/cpuid-enumeration-and-architectural-msrs.html>
#[allow(clippy::unusual_byte_groupings)]
pub fn t2a() -> CustomCpuTemplate {
CustomCpuTemplate {
cpuid_modifiers: vec![
CpuidLeafModifier {
leaf: 0x1,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EAX: Version Information
// - Bits 03-00: Stepping (AMD APM) / Stepping ID (Intel SDM)
// - Bits 07-04: BaseModel (AMD APM) / Model (Intel SDM)
// - Bits 11-08: BaseFamily (AMD APM) / Family (Intel SDM)
// - Bits 13-12: Reserved (AMD APM) / Processor Type (Intel SDM)
// - Bits 19-16: ExtModel (AMD APM) / Extended Model ID (Intel SDM)
// - Bits 27-20: ExtFamily (AMD APM) / Extended Family ID (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_11111111_1111_00_11_1111_1111_1111,
value: 0b0000_00000000_0011_00_00_0110_1111_0010,
},
},
// ECX: Feature Information
// - Bit 02: Reserved (AMD APM) / DTES64 (Intel SDM)
// - Bit 03: MONITOR (AMD APM) / MONITOR (Intel SDM)
// - Bit 04: Reserved (AMD APM) / DS-CPL (Intel SDM)
// - Bit 05: Reserved (AMD APM) / VMX (Intel SDM)
// - Bit 06: Reserved (AMD APM) / SMX (Intel SDM)
// - Bit 07: Reserved (AMD APM) / EIST (Intel SDM)
// - Bit 08: Reserved (AMD APM) / TM2 (Intel SDM)
// - Bit 10: Reserved (AMD APM) / CNXT-ID (Intel SDM)
// - Bit 11: Reserved (AMD APM) / SDBG (Intel SDM)
// - Bit 14: Reserved (AMD APM) / xTPR Update Control (Intel SDM)
// - Bit 15: Reserved (AMD APM) / PDCM (Intel SDM)
// - Bit 18: Reserevd (AMD APM) / DCA (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0100_1100_1101_1111_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX: Feature Information
// - Bit 07: MCE (AMD APM) / MCE (Intel SDM)
// - Bit 12: MTRR (AMD APM) / MTRR (Intel SDM)
// - Bit 18: Reserved (AMD APM) / PSN (Intel SDM)
// - Bit 21: Reserved (AMD APM) / DS (Intel SDM)
// - Bit 22: Reserved (AMD APM) / ACPI (Intel SDM)
// - Bit 27: Reserved (AMD APM) / SS (Intel SDM)
// - Bit 29: Reserved (AMD APM) / TM (Intel SDM)
// - Bit 30: Reserved (AMD APM) / IA-64 (deprecated) https://www.intel.com/content/dam/www/public/us/en/documents/manuals/itanium-architecture-vol-4-manual.pdf
// - Bit 31: Reserved (AMD APM) / PBE (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b1110_1000_0110_0100_0001_0000_1000_0000,
value: 0b0000_0000_0000_0000_0001_0000_1000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x7,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EBX:
// - Bit 02: Reserved (AMD APM) / SGX (Intel SDM)
// - Bit 04: Reserved (AMD APM) / HLE (Intel SDM)
// - Bit 09: Reserved (AMD APM) / Enhanced REP MOVSB/STOSB (Intel SDM)
// - Bit 11: Reserved (AMD APM) / RTM (Intel SDM)
// - Bit 12: PQM (AMD APM) / RDT-M (Intel SDM)
// - Bit 14: Reserved (AMD APM) / MPX (Intel SDM)
// - Bit 15: PQE (AMD APM) / RDT-A (Intel SDM)
// - Bit 16: Reserved (AMD APM) / AVX512F (Intel SDM)
// - Bit 17: Reserved (AMD APM) / AVX512DQ (Intel SDM)
// - Bit 18: RDSEED (AMD APM) / RDSEED (Intel SDM)
// - Bit 19: ADX (AMD APM) / ADX (Intel SDM)
// - Bit 21: Reserved (AMD APM) / AVX512_IFMA (Intel SDM)
// - Bit 22: RDPID (AMD APM) / Reserved (Intel SDM)
// On kernel codebase and Intel SDM, RDPID is enumerated at CPUID.07h:ECX.RDPID[bit 22].
// https://elixir.bootlin.com/linux/v6.3.8/source/arch/x86/include/asm/cpufeatures.h#L389
// - Bit 23: CLFLUSHOPT (AMD APM) / CLFLUSHOPT (Intel SDM)
// - Bit 24: CLWB (AMD APM) / CLWB (Intel SDM)
// - Bit 25: Reserved (AMD APM) / Intel Processor Trace (Intel SDM)
// - Bit 26: Reserved (AMD APM) / AVX512PF (Intel SDM)
// - Bit 27: Reserved (AMD APM) / AVX512ER (Intel SDM)
// - Bit 28: Reserved (AMD APM) / AVX512CD (Intel SDM)
// - Bit 29: SHA (AMD APM) / SHA (Intel SDM)
// - Bit 30: Reserved (AMD APM) / AVX512BW (Intel SDM)
// - Bit 31: Reserved (AMD APM) / AVX512VL (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b1111_1111_1110_1111_1101_1010_0001_0100,
value: 0b0000_0000_0000_0000_0000_0010_0000_0000,
},
},
// ECX:
// - Bit 01: Reserved (AMD APM) / AVX512_VBMI (Intel SDM)
// - Bit 02: UMIP (AMD APM) / UMIP (Intel SDM)
// - Bit 03: PKU (AMD APM) / PKU (Intel SDM)
// - Bit 04: OSPKE (AMD APM) / OSPKE (Intel SDM)
// - Bit 06: Reserved (AMD APM) / AVX512_VBMI2 (Intel SDM)
// - Bit 08: Reserved (AMD APM) / GFNI (Intel SDM)
// - Bit 09: VAES (AMD APM) / VAES (Intel SDM)
// - Bit 10: VPCLMULQDQ (AMD APM) / VPCLMULQDQ (Intel SDM)
// - Bit 11: Reserved (AMD APM) / AVX512_VNNI (Intel SDM)
// - Bit 12: Reserved (AMD APM) / AVX512_BITALG (Intel SDM)
// - Bit 14: Reserved (AMD APM) / AVX512_VPOPCNTDQ (Intel SDM)
// - Bit 16: LA57 (AMD APM) / LA57 (Intel SDM)
// - Bit 22: Reserved (AMD APM) / RDPID and IA32_TSC_AUX (Intel SDM)
// - Bit 30: Reserved (AMD APM) / SGX_LC (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0100_0000_0100_0001_0101_1111_0101_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 02: Reserved (AMD APM) / AVX512_4VNNIW (Intel SDM)
// - Bit 03: Reserved (AMD APM) / AVX512_4FMAPS (Intel SDM)
// - Bit 04: Reserved (AMD APM) / Fast Short REP MOV (Intel SDM)
// - Bit 08: Reserved (AMD APM) / AVX512_VP2INTERSECT (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0001_0001_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bits 04-03: Reserved (AMD APM) / MPX state (Intel SDM)
// - Bits 07-05: Reserved (AMD APM) / AVX-512 state (Intel SDM)
// - Bit 09: MPK (AMD APM) / PKRU state (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_00_1_0_111_11_000,
value: 0b0000_0000_0000_0000_0000_00_0_0_000_00_000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x1,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bit 01: XSAVEC (AMD APM) / Supports XSAVEC and the compacted form of
// XRSTOR (Intel SDM)
// - Bit 02: XGETBV (AMD APM) / Supports XGETBV (Intel SDM)
// - Bit 03: XSAVES (AMD APM) / Supports XSAVES/XRSTORS and IA32_XSS (Intel
// SDM)
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0000_0000_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000001,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// ECX:
// - Bit 02: SVM (AMD APM) / Reserved (Intel SDM)
// - Bit 06: SSE4A (AMD APM) / Reserved (Intel SDM)
// - Bit 07: MisAlignSse (AMD APM) / Reserved (Intel SDM)
// - Bit 08: 3DNowPrefetch (AMD APM) / PREFETCHW (Intel SDM)
// - Bit 29: MONITORX (AMD APM) / MONITORX and MWAITX (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0010_0000_0000_0000_0000_0001_1100_0100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 22: MmxExt (AMD APM) / Reserved (Intel SDM)
// - Bit 25: FFXSR (AMD APM) / Reserved (Intel SDM)
// - Bit 26: Page1GB (AMD APM) / 1-GByte pages (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0110_0100_0000_0000_0000_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000008,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EBX:
// - Bit 00: CLZERO (AMD APM) / Reserved (Intel SDM)
// - Bit 02: RstrFpErrPtrs (AMD APM) / Reserved (Intel SDM)
// - Bit 09: WBNOINVD (AMD APM) / WBNOINVD (Intel SDM)
// - Bit 18: IbrsPreferred (ADM APM) / Reserved (Intel SDm)
// - Bit 19: IbrsSameMode (AMD APM) / Reserved (Intel SDM)
// - Bit 20: EferLmsleUnsupported (AMD APM) / Reserved (Intel SDM)
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0001_1100_0000_0010_0000_0101,
value: 0b0000_0000_0001_1100_0000_0000_0000_0100,
},
},
],
},
],
msr_modifiers: vec![],
..Default::default()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/x86_64/static_cpu_templates/t2cl.rs | src/vmm/src/cpu_config/x86_64/static_cpu_templates/t2cl.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::cpu_config::templates::{CustomCpuTemplate, RegisterValueFilter};
use crate::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use crate::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegister, CpuidRegisterModifier, RegisterModifier,
};
/// T2CL template
///
/// Mask CPUID to make exposed CPU features as close as possbile to Intel Cascade Lake and provide
/// instruction set feature partity with AMD Milan using T2A template.
///
/// References:
/// - Intel SDM: <https://cdrdv2.intel.com/v1/dl/getContent/671200>
/// - AMD APM: <https://www.amd.com/system/files/TechDocs/40332.pdf>
/// - CPUID Enumeration and Architectural MSRs: <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/cpuid-enumeration-and-architectural-msrs.html>
#[allow(clippy::unusual_byte_groupings)]
pub fn t2cl() -> CustomCpuTemplate {
CustomCpuTemplate {
cpuid_modifiers: vec![
CpuidLeafModifier {
leaf: 0x1,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EAX: Version Information
// - Bits 03-00: Stepping ID (Intel SDM) / Stepping (AMD APM)
// - Bits 07-04: Model (Intel SDM) / BaseModel (AMD APM)
// - Bits 11-08: Family (Intel SDM) / BaseFamily (AMD APM)
// - Bits 13-12: Processor Type (Intel SDM) / Reserved (AMD APM)
// - Bits 19-16: Extended Model ID (Intel SDM) / ExtModel (AMD APM)
// - Bits 27-20: Extended Family ID (Intel SDM) / ExtFamily (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_11111111_1111_00_11_1111_1111_1111,
value: 0b0000_00000000_0011_00_00_0110_1111_0010,
},
},
// ECX: Feature Information
// - Bit 02: DTES64 (Intel SDM) / Reserved (AMD APM)
// - Bit 03: MONITOR (Intel SDM) / MONITOR (AMD APM)
// - Bit 04: DS-CPL (Intel SDM) / Reserved (AMD APM)
// - Bit 05: VMX (Intel SDM) / Reserved (AMD APM)
// - Bit 06: SMX (Intel SDM) / Reserved (AMD APM)
// - Bit 07: EIST (Intel SDM) / Reserved (AMD APM)
// - Bit 08: TM2 (Intel SDM) / Reserved (AMD APM)
// - Bit 10: CNXT-ID (Intel SDM) / Reserved (AMD APM)
// - Bit 11: SDBG (Intel SDM) / Reserved (AMD APM)
// - Bit 14: xTPR Update Control (Intel SDM) / Reserved (AMD APM)
// - Bit 15: PDCM (Intel SDM) / Reserved (AMD APM)
// - Bit 18: DCA (Intel SDM) / Reserevd (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0100_1100_1101_1111_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX: Feature Information
// - Bit 07: MCE (Intel SDM) / MCE (AMD APM)
// - Bit 12: MTRR (Intel SDM) / MTRR (AMD APM)
// - Bit 18: PSN (Intel SDM) / Reserved (AMD APM)
// - Bit 21: DS (Intel SDM) / Reserved (AMD APM)PC
// - Bit 22: ACPI (Intel SDM) / Reserved (AMD APM)
// - Bit 27: SS (Intel SDM) / Reserved (AMD APM)
// - Bit 29: TM (Intel SDM) / Reserved (AMD APM)
// - Bit 30: IA64 (deprecated) / Reserved (AMD APM) https://www.intel.com/content/dam/www/public/us/en/documents/manuals/itanium-architecture-vol-4-manual.pdf
// - Bit 31: PBE (Intel SDM) / Reserved (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b1110_1000_0110_0100_0001_0000_1000_0000,
value: 0b0000_0000_0000_0000_0001_0000_1000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x7,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EBX:
// - Bit 02: SGX (Intel SDM) / Reserved (AMD APM)
// - Bit 04: HLE (Intel SDM) / Reserved (AMD APM)
// - Bit 09: Enhanced REP MOVSB/STOSB (Intel SDM) / Reserved (AMD APM)
// - Bit 11: RTM (Intel SDM) / Reserved (AMD APM)
// - Bit 12: RDT-M (Intel SDM) / PQM (AMD APM)
// - Bit 14: MPX (Intel SDM) / Reserved (AMD APM)
// - Bit 15: RDT-A (Intel SDM) / PQE (AMD APM)
// - Bit 16: AVX512F (Intel SDM) / Reserved (AMD APM)
// - Bit 17: AVX512DQ (Intel SDM) / Reserved (AMD APM)
// - Bit 18: RDSEED (Intel SDM) / RDSEED (AMD APM)
// - Bit 19: ADX (Intel SDM) / ADX (AMD APM)
// - Bit 21: AVX512_IFMA (Intel SDM) / Reserved (AMD APM)
// - Bit 22: Reserved (Intel SDM) / RDPID (AMD APM)
// On kernel codebase and Intel SDM, RDPID is enumerated at CPUID.07h:ECX.RDPID[bit 22].
// https://elixir.bootlin.com/linux/v6.3.8/source/arch/x86/include/asm/cpufeatures.h#L389
// - Bit 23: CLFLUSHOPT (Intel SDM) / CLFLUSHOPT (AMD APM)
// - Bit 24: CLWB (Intel SDM) / CLWB (AMD APM)
// - Bit 25: Intel Processor Trace (Intel SDM) / Reserved (AMD APM)
// - Bit 26: AVX512PF (Intel SDM) / Reserved (AMD APM)
// - Bit 27: AVX512ER (Intel SDM) / Reserved (AMD APM)
// - Bit 28: AVX512CD (Intel SDM) / Reserved (AMD APM)
// - Bit 29: SHA (Intel SDM) / SHA (AMD APM)
// - Bit 30: AVX512BW (Intel SDM) / Reserved (AMD APM)
// - Bit 31: AVX512VL (Intel SDM) / Reserved (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b1111_1111_1110_1111_1101_1010_0001_0100,
value: 0b0000_0000_0000_0000_0000_0010_0000_0000,
},
},
// ECX:
// - Bit 01: AVX512_VBMI (Intel SDM) / Reserved (AMD APM)
// - Bit 02: UMIP (Intel SDM) / UMIP (AMD APM)
// - Bit 03: PKU (Intel SDM) / PKU (AMD APM)
// - Bit 04: OSPKE (Intel SDM) / OSPKE (AMD APM)
// - Bit 06: AVX512_VBMI2 (Intel SDM) / Reserved (AMD APM)
// - Bit 08: GFNI (Intel SDM) / Reserved (AMD APM)
// - Bit 09: VAES (Intel SDM) / VAES (AMD APM)
// - Bit 10: VPCLMULQDQ (Intel SDM) / VPCLMULQDQ (AMD APM)
// - Bit 11: AVX512_VNNI (Intel SDM) / Reserved (AMD APM)
// - Bit 12: AVX512_BITALG (Intel SDM) / Reserved (AMD APM)
// - Bit 14: AVX512_VPOPCNTDQ (Intel SDM) / Reserved (AMD APM)
// - Bit 16: LA57 (Intel SDM) / LA57 (AMD APM)
// - Bit 22: RDPID and IA32_TSC_AUX (Intel SDM) / Reserved (AMD APM)
// - Bit 30: SGX_LC (Intel SDM) / Reserved (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0100_0000_0100_0001_0101_1111_0101_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 02: AVX512_4VNNIW (Intel SDM) / Reserved (AMD APM)
// - Bit 03: AVX512_4FMAPS (Intel SDM) / Reserved (AMD APM)
// - Bit 04: Fast Short REP MOV (Intel SDM) / Reserved (AMD APM)
// - Bit 08: AVX512_VP2INTERSECT (Intel SDM) / Reserved (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0001_0001_1100,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x0,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bits 04-03: MPX state (Intel SDM) / Reserved (AMD APM)
// - Bits 07-05: AVX-512 state (Intel SDM) / Reserved (AMD APM)
// - Bit 09: PKRU state (Intel SDM) / MPK (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_00_1_0_111_11_000,
value: 0b0000_0000_0000_0000_0000_00_0_0_000_00_000,
},
},
],
},
CpuidLeafModifier {
leaf: 0xd,
subleaf: 0x1,
flags: KvmCpuidFlags(1),
modifiers: vec![
// EAX:
// - Bit 01: Supports XSAVEC and the compacted form of XRSTOR (Intel SDM) /
// XSAVEC (AMD APM)
// - Bit 02: Supports XGETBV (Intel SDM) / XGETBV (AMD APM)
// - Bit 03: Supports XSAVES/XRSTORS and IA32_XSS (Intel SDM) / XSAVES (AMD
// APM)
CpuidRegisterModifier {
register: CpuidRegister::Eax,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0000_0000_1110,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000001,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// ECX:
// - Bit 06: Reserved (Intel SDM) / SSE4A (AMD APM)
// - Bit 07: Reserved (Intel SDM) / MisAlignSse (AMD APM)
// - Bit 08: PREFETCHW (Intel SDM) / 3DNowPrefetch (AMD APM)
// - Bit 29: MONITORX and MWAITX (Intel SDM) / MONITORX (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Ecx,
bitmap: RegisterValueFilter {
filter: 0b0010_0000_0000_0000_0000_0001_1100_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
// EDX:
// - Bit 22: Reserved (Intel SDM) / MmxExt (AMD APM)
// - Bit 23: Reserved (Intel SDM) / MMX (AMD APM)
// - Bit 24: Reserved (Intel SDM) / FSXR (AMD APM)
// - Bit 25: Reserved (Intel SDM) / FFXSR (AMD APM)
// - Bit 26: 1-GByte pages (Intel SDM) / Page1GB (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Edx,
bitmap: RegisterValueFilter {
filter: 0b0000_0111_1100_0000_0000_0000_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
CpuidLeafModifier {
leaf: 0x80000008,
subleaf: 0x0,
flags: KvmCpuidFlags(0),
modifiers: vec![
// EBX:
// - Bit 09: WBNOINVD (Intel SDM) / WBNOINVD (AMD APM)
CpuidRegisterModifier {
register: CpuidRegister::Ebx,
bitmap: RegisterValueFilter {
filter: 0b0000_0000_0000_0000_0000_0010_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
},
],
msr_modifiers: vec![
// IA32_ARCH_CAPABILITIES:
// - Bit 09: MCU_CONTROL
// - Bit 10: MISC_PACKAGE_CTLS
// - Bit 11: ENERGY_FILTERING_CTL
// - Bit 12: DOITM
// - Bit 16: Reserved
// - Bit 18: FB_CLEAR_CTRL
// - Bit 20: BHI_NO
// - Bit 21: XAPIC_DISABLE_STATUS
// - Bit 22: Reserved
// - Bit 23: OVERCLOCKING_STATUS
// - Bit 25: GDS_CTRL
// - Bits 63-27: Reserved (Intel SDM)
//
// As T2CL template does not aim to provide an ability to migrate securely guests across
// different processors, there is no need to mask hardware security mitigation bits off
// only to make it appear to the guest as if it's running on the most vulnerable of the
// supported processors. Guests might be able to benefit from performance improvements
// by making the most use of available mitigations on the processor. Thus, T2CL template
// passes through security mitigation bits that KVM thinks are able to be passed
// through. The list of such bits are found in the following link.
// https://elixir.bootlin.com/linux/v6.8.2/source/arch/x86/kvm/x86.c#L1621
// - Bit 00: RDCL_NO
// - Bit 01: IBRS_ALL
// - Bit 02: RSBA
// - Bit 03: SKIP_L1DFL_VMENTRY
// - Bit 04: SSB_NO
// - Bit 05: MDS_NO
// - Bit 06: IF_PSCHANGE_MC_NO
// - Bit 07: TSX_CTRL
// - Bit 08: TAA_NO
// - Bit 13: SBDR_SSDP_NO
// - Bit 14: FBSDP_NO
// - Bit 15: PSDP_NO
// - Bit 17: FB_CLEAR
// - Bit 19: RRSBA
// - Bit 24: PBRSB_NO
// - Bit 26: GDS_NO
// - Bit 27: RFDS_NO
// - Bit 28: RFDS_CLEAR
//
// Note that this MSR is specific to Intel processors.
RegisterModifier {
addr: 0x10a,
bitmap: RegisterValueFilter {
filter: 0b1111_1111_1111_1111_1111_1111_1111_1111_1110_0010_1111_0101_0001_1110_0000_0000,
value: 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000,
},
},
],
..Default::default()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/aarch64/custom_cpu_template.rs | src/vmm/src/cpu_config/aarch64/custom_cpu_template.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Guest config sub-module specifically for
/// config templates.
use std::borrow::Cow;
use serde::de::Error;
use serde::{Deserialize, Serialize};
use crate::arch::aarch64::regs::{RegSize, reg_size};
use crate::cpu_config::aarch64::static_cpu_templates::v1n1;
use crate::cpu_config::templates::{
CpuTemplateType, GetCpuTemplate, GetCpuTemplateError, KvmCapability, RegisterValueFilter,
StaticCpuTemplate,
};
use crate::cpu_config::templates_serde::*;
impl GetCpuTemplate for Option<CpuTemplateType> {
fn get_cpu_template(&self) -> Result<Cow<'_, CustomCpuTemplate>, GetCpuTemplateError> {
match self {
Some(template_type) => match template_type {
CpuTemplateType::Custom(template) => Ok(Cow::Borrowed(template)),
CpuTemplateType::Static(template) => match template {
// TODO: Check if the CPU model is Neoverse-V1.
StaticCpuTemplate::V1N1 => Ok(Cow::Owned(v1n1::v1n1())),
other => Err(GetCpuTemplateError::InvalidStaticCpuTemplate(*other)),
},
},
None => Ok(Cow::Owned(CustomCpuTemplate::default())),
}
}
}
/// Wrapper type to containing aarch64 CPU config modifiers.
#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct CustomCpuTemplate {
/// Additional kvm capabilities to check before
/// configuring vcpus.
#[serde(default)]
pub kvm_capabilities: Vec<KvmCapability>,
/// Modifiers of enabled vcpu features for vcpu.
#[serde(default)]
pub vcpu_features: Vec<VcpuFeatures>,
/// Modifiers for registers on Aarch64 CPUs.
#[serde(default)]
pub reg_modifiers: Vec<RegisterModifier>,
}
impl CustomCpuTemplate {
/// Get a list of register IDs that are modified by the CPU template.
pub fn reg_list(&self) -> Vec<u64> {
self.reg_modifiers
.iter()
.map(|modifier| modifier.addr)
.collect()
}
/// Validate the correctness of the template.
pub fn validate(&self) -> Result<(), serde_json::Error> {
for modifier in self.reg_modifiers.iter() {
let reg_size = reg_size(modifier.addr);
match RegSize::from(reg_size) {
RegSize::U32 | RegSize::U64 => {
// Safe to unwrap because the number of bits is limited
let limit = 2u128.pow(u32::try_from(reg_size).unwrap() * 8) - 1;
if limit < modifier.bitmap.value || limit < modifier.bitmap.filter {
return Err(serde_json::Error::custom(format!(
"Invalid size of bitmap for register {:#x}, should be <= {} bits",
modifier.addr,
reg_size * 8
)));
}
}
RegSize::U128 => {}
_ => {
return Err(serde_json::Error::custom(format!(
"Invalid aarch64 register address: {:#x} - Only 32, 64 and 128 bit wide \
registers are supported",
modifier.addr
)));
}
}
}
Ok(())
}
}
/// Struct for defining enabled vcpu features
#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct VcpuFeatures {
/// Index in the `kvm_bindings::kvm_vcpu_init.features` array.
pub index: u32,
/// Modifier for the value in the `kvm_bindings::kvm_vcpu_init.features` array.
pub bitmap: RegisterValueFilter<u32>,
}
/// Wrapper of a mask defined as a bitmap to apply
/// changes to a given register's value.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct RegisterModifier {
/// Pointer of the location to be bit mapped.
#[serde(
deserialize_with = "deserialize_from_str_u64",
serialize_with = "serialize_to_hex_str"
)]
pub addr: u64,
/// Bit mapping to be applied as a modifier to the
/// register's value at the address provided.
pub bitmap: RegisterValueFilter<u128>,
}
#[cfg(test)]
mod tests {
use serde_json::Value;
use super::*;
use crate::cpu_config::templates::test_utils::{TEST_TEMPLATE_JSON, build_test_template};
#[test]
fn test_get_cpu_template_with_no_template() {
// Test `get_cpu_template()` when no template is provided. The empty owned
// `CustomCpuTemplate` should be returned.
let cpu_template = None;
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Owned(CustomCpuTemplate::default()),
);
}
#[test]
fn test_get_cpu_template_with_v1n1_static_template() {
// Test `get_cpu_template()` when V1N1 static CPU template is specified. The owned
// `CustomCpuTemplate` should be returned.
let cpu_template = Some(CpuTemplateType::Static(StaticCpuTemplate::V1N1));
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Owned(v1n1::v1n1())
);
}
#[test]
fn test_get_cpu_tempalte_with_none_static_template() {
// Test `get_cpu_template()` when no static CPU template is provided.
// `InvalidStaticCpuTemplate` error should be returned because it is no longer valid and
// was replaced with `None` of `Option<CpuTemplateType>`.
let cpu_template = Some(CpuTemplateType::Static(StaticCpuTemplate::None));
assert_eq!(
cpu_template.get_cpu_template().unwrap_err(),
GetCpuTemplateError::InvalidStaticCpuTemplate(StaticCpuTemplate::None)
);
}
#[test]
fn test_get_cpu_template_with_custom_template() {
// Test `get_cpu_template()` when a custom CPU template is provided. The borrowed
// `CustomCpuTemplate` should be returned.
let inner_cpu_template = CustomCpuTemplate::default();
let cpu_template = Some(CpuTemplateType::Custom(inner_cpu_template.clone()));
assert_eq!(
cpu_template.get_cpu_template().unwrap(),
Cow::Borrowed(&inner_cpu_template)
);
}
#[test]
fn test_correct_json() {
let cpu_config_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"kvm_capabilities": ["1", "!2"],
"vcpu_features":[{"index":0,"bitmap":"0b1100000"}],
"reg_modifiers": [
{
"addr": "0x0030000000000000",
"bitmap": "0bx00100x0x1xxxx01xxx1xxxxxxxxxxx1"
}
]
}"#,
);
cpu_config_result.unwrap();
}
#[test]
fn test_malformed_json() {
// Malformed kvm capabilities
let cpu_config_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"kvm_capabilities": ["1", "!a2"],
"vcpu_features":[{"index":0,"bitmap":"0b1100000"}]
}"#,
);
cpu_config_result.unwrap_err();
// Malformed vcpu features
let cpu_config_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"kvm_capabilities": ["1", "!2"],
"vcpu_features":[{"index":0,"bitmap":"0b11abc00"}]
}"#,
);
cpu_config_result.unwrap_err();
// Malformed register address
let cpu_config_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"reg_modifiers": [
{
"addr": "j",
"bitmap": "0bx00100xxx1xxxx00xxx1xxxxxxxxxxx1"
}
]
}"#,
);
let error_msg: String = cpu_config_result.unwrap_err().to_string();
// Formatted error expected clarifying the number system prefix is missing
assert!(
error_msg.contains("No supported number system prefix found in value"),
"{}",
error_msg
);
// Malformed address as binary
let cpu_config_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"reg_modifiers": [
{
"addr": "0bK",
"bitmap": "0bx00100xxx1xxxx00xxx1xxxxxxxxxxx1"
}
]
}"#,
);
assert!(
cpu_config_result
.unwrap_err()
.to_string()
.contains("Failed to parse string [0bK] as a number for CPU template")
);
// Malformed 64-bit bitmap - filter failed
let cpu_config_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"reg_modifiers": [
{
"addr": "0x0030000000000000",
"bitmap": "0bx0?1_0_0x_?x1xxxx00xxx1xxxxxxxxxxx1"
}
]
}"#,
);
assert!(cpu_config_result.unwrap_err().to_string().contains(
"Failed to parse string [0bx0?1_0_0x_?x1xxxx00xxx1xxxxxxxxxxx1] as a bitmap"
));
// Malformed 64-bit bitmap - value failed
let cpu_config_result = serde_json::from_str::<CustomCpuTemplate>(
r#"{
"reg_modifiers": [
{
"addr": "0x0030000000000000",
"bitmap": "0bx00100x0x1xxxx05xxx1xxxxxxxxxxx1"
}
]
}"#,
);
assert!(
cpu_config_result.unwrap_err().to_string().contains(
"Failed to parse string [0bx00100x0x1xxxx05xxx1xxxxxxxxxxx1] as a bitmap"
)
);
}
#[test]
fn test_deserialization_lifecycle() {
let cpu_config = serde_json::from_str::<CustomCpuTemplate>(TEST_TEMPLATE_JSON)
.expect("Failed to deserialize custom CPU template.");
assert_eq!(2, cpu_config.reg_modifiers.len());
}
#[test]
fn test_serialization_lifecycle() {
let template = build_test_template();
let template_json_str_result = serde_json::to_string_pretty(&template);
let template_json = template_json_str_result.unwrap();
let deserialization_result = serde_json::from_str::<CustomCpuTemplate>(&template_json);
assert_eq!(template, deserialization_result.unwrap());
}
/// Test to confirm that templates for different CPU architectures have
/// a size bitmask that is supported by the architecture when serialized to JSON.
#[test]
fn test_bitmap_width() {
let mut checked = false;
let template = build_test_template();
let aarch64_template_str =
serde_json::to_string(&template).expect("Error serializing aarch64 template");
let json_tree: Value = serde_json::from_str(&aarch64_template_str)
.expect("Error deserializing aarch64 template JSON string");
// Check that bitmap for aarch64 masks are serialized to 128-bits
if let Some(modifiers_root) = json_tree.get("reg_modifiers") {
let mod_node = &modifiers_root.as_array().unwrap()[0];
if let Some(bit_map_str) = mod_node.get("bitmap") {
// 128-bit width with a "0b" prefix for binary-formatted numbers
assert_eq!(bit_map_str.as_str().unwrap().len(), 130);
assert!(bit_map_str.as_str().unwrap().starts_with("0b"));
checked = true;
}
}
assert!(
checked,
"Bitmap width in a aarch64 template was not tested."
);
}
#[test]
fn test_cpu_template_validate() {
// 32, 64 and 128 bit regs with correct filters and values
let template = CustomCpuTemplate {
reg_modifiers: vec![
RegisterModifier {
addr: 0x0020000000000000,
bitmap: RegisterValueFilter {
filter: 0x1,
value: 0x2,
},
},
RegisterModifier {
addr: 0x0030000000000000,
bitmap: RegisterValueFilter {
filter: 0x1,
value: 0x2,
},
},
RegisterModifier {
addr: 0x0040000000000000,
bitmap: RegisterValueFilter {
filter: 0x1,
value: 0x2,
},
},
],
..Default::default()
};
template.validate().unwrap();
// 32 bit reg with too long filter
let template = CustomCpuTemplate {
reg_modifiers: vec![RegisterModifier {
addr: 0x0020000000000000,
bitmap: RegisterValueFilter {
filter: 0x100000000,
value: 0x2,
},
}],
..Default::default()
};
template.validate().unwrap_err();
// 32 bit reg with too long value
let template = CustomCpuTemplate {
reg_modifiers: vec![RegisterModifier {
addr: 0x0020000000000000,
bitmap: RegisterValueFilter {
filter: 0x1,
value: 0x100000000,
},
}],
..Default::default()
};
template.validate().unwrap_err();
// 16 bit unsupporteed reg
let template = CustomCpuTemplate {
reg_modifiers: vec![RegisterModifier {
addr: 0x0010000000000000,
bitmap: RegisterValueFilter {
filter: 0x1,
value: 0x2,
},
}],
..Default::default()
};
template.validate().unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/aarch64/test_utils.rs | src/vmm/src/cpu_config/aarch64/test_utils.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::arch::aarch64::regs::{ID_AA64ISAR0_EL1, ID_AA64PFR0_EL1};
use crate::cpu_config::aarch64::custom_cpu_template::RegisterModifier;
use crate::cpu_config::templates::{CustomCpuTemplate, RegisterValueFilter};
/// Test CPU template in JSON format
pub const TEST_TEMPLATE_JSON: &str = r#"{
"reg_modifiers": [
{
"addr": "0x0030000000000011",
"bitmap": "0b1xx1"
},
{
"addr": "0x0030000000000022",
"bitmap": "0b1x00"
}
]
}"#;
/// Test CPU template in JSON format but has an invalid field for the architecture.
/// "msr_modifiers" is the field name for the model specific registers for
/// defined by x86 CPUs.
pub const TEST_INVALID_TEMPLATE_JSON: &str = r#"{
"msr_modifiers": [
{
"addr": "0x0AAC",
"bitmap": "0b1xx1"
}
]
}"#;
/// Builds a sample custom CPU template
pub fn build_test_template() -> CustomCpuTemplate {
CustomCpuTemplate {
reg_modifiers: vec![
RegisterModifier {
addr: ID_AA64PFR0_EL1,
bitmap: RegisterValueFilter {
filter: 0b100010001,
value: 0b100000001,
},
},
RegisterModifier {
addr: ID_AA64ISAR0_EL1,
bitmap: RegisterValueFilter {
filter: 0b1110,
value: 0b0110,
},
},
],
..Default::default()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/aarch64/mod.rs | src/vmm/src/cpu_config/aarch64/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Module for custom CPU templates
pub mod custom_cpu_template;
/// Module for static CPU templates
pub mod static_cpu_templates;
/// Module with test utils for custom CPU templates
pub mod test_utils;
use super::templates::CustomCpuTemplate;
use crate::Vcpu;
use crate::arch::aarch64::regs::{Aarch64RegisterVec, RegSize};
use crate::arch::aarch64::vcpu::{VcpuArchError, get_registers};
use crate::vstate::vcpu::KvmVcpuError;
/// Errors thrown while configuring templates.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum CpuConfigurationError {
/// Error initializing the vcpu: {0}
VcpuInit(#[from] KvmVcpuError),
/// Error reading vcpu registers: {0}
VcpuGetRegs(#[from] VcpuArchError),
}
/// CPU configuration for aarch64
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct CpuConfiguration {
/// Vector of CPU registers
pub regs: Aarch64RegisterVec,
}
impl CpuConfiguration {
/// Create new CpuConfiguration.
pub fn new(
cpu_template: &CustomCpuTemplate,
vcpus: &mut [Vcpu],
) -> Result<Self, CpuConfigurationError> {
for vcpu in vcpus.iter_mut() {
vcpu.kvm_vcpu.init(&cpu_template.vcpu_features)?;
}
let mut regs = Aarch64RegisterVec::default();
get_registers(&vcpus[0].kvm_vcpu.fd, &cpu_template.reg_list(), &mut regs)?;
Ok(CpuConfiguration { regs })
}
/// Creates new guest CPU config based on the provided template
pub fn apply_template(mut self, template: &CustomCpuTemplate) -> Self {
for (modifier, mut reg) in template.reg_modifiers.iter().zip(self.regs.iter_mut()) {
match reg.size() {
RegSize::U32 => {
reg.set_value(
(modifier.bitmap.apply(u128::from(reg.value::<u32, 4>())) & 0xFFFF_FFFF)
as u32,
);
}
RegSize::U64 => {
reg.set_value(
(modifier.bitmap.apply(u128::from(reg.value::<u64, 8>()))
& 0xFFFF_FFFF_FFFF_FFFF) as u64,
);
}
RegSize::U128 => {
reg.set_value(modifier.bitmap.apply(reg.value::<u128, 16>()));
}
_ => unreachable!("Only 32, 64 and 128 bit wide registers are supported"),
}
}
self
}
/// Returns ids of registers that are changed
/// by this template
pub fn register_ids(&self) -> Vec<u64> {
self.regs.iter().map(|reg| reg.id).collect()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/aarch64/static_cpu_templates/mod.rs | src/vmm/src/cpu_config/aarch64/static_cpu_templates/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::{Deserialize, Serialize};
/// Module with V1N1 CPU template for aarch64
pub mod v1n1;
/// Templates available for configuring the supported ARM CPU types.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum StaticCpuTemplate {
/// Template to mask Neoverse-V1 as Neoverse-N1
V1N1,
/// No CPU template is used.
#[default]
None,
}
impl StaticCpuTemplate {
/// Check if no template specified
pub fn is_none(&self) -> bool {
self == &StaticCpuTemplate::None
}
}
impl std::fmt::Display for StaticCpuTemplate {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
StaticCpuTemplate::V1N1 => write!(f, "V1N1"),
StaticCpuTemplate::None => write!(f, "None"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cpu_config::test_utils::get_json_template;
#[test]
fn verify_consistency_with_json_templates() {
let static_templates = [(v1n1::v1n1(), "V1N1.json")];
for (hardcoded_template, filename) in static_templates {
let json_template = get_json_template(filename);
assert_eq!(hardcoded_template, json_template);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/cpu_config/aarch64/static_cpu_templates/v1n1.rs | src/vmm/src/cpu_config/aarch64/static_cpu_templates/v1n1.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::arch::aarch64::regs::{
ID_AA64ISAR0_EL1, ID_AA64ISAR1_EL1, ID_AA64MMFR2_EL1, ID_AA64PFR0_EL1,
};
use crate::cpu_config::aarch64::custom_cpu_template::{CustomCpuTemplate, RegisterModifier};
use crate::cpu_config::templates::RegisterValueFilter;
// Arm Armv8-A Architecture Registers documentation
// https://developer.arm.com/documentation/ddi0595/2021-12/AArch64-Registers?lang=en
/// Template to mask Neoverse-V1 as Neoverse-N1
/// Masks: dgh, asimdfhm, bf16, dcpodp, flagm, i8mm, sha3, sha512, sm3, sm4
/// sve, svebf16, svei8mm, uscat, fcma, jscvt, dit, ilrcpc, rng
pub fn v1n1() -> CustomCpuTemplate {
CustomCpuTemplate {
reg_modifiers: vec![
RegisterModifier {
// Disabling sve CPU feature. Setting to 0b0000.
// This disables sve, svebf16, svei8mm
// sve occupies bits [35:32] in ID_AA64PFR0_EL1.
//
// Disabling dit CPU feature. Setting to 0b0000.
// dit occupies bits [51:48] in ID_AA64PFR0_EL1.
addr: ID_AA64PFR0_EL1,
bitmap: RegisterValueFilter {
filter: 0x000F000F00000000,
value: 0x0000000000000000,
},
},
RegisterModifier {
// Disabling sha3 CPU feature. Setting sha3 to 0b0000.
// Disabling sha512 CPU feature. Setting sha2 to 0b0001.
// sha3 occupies bits [35:32] in ID_AA64ISAR0_EL1.
// sha2 occupies bits [15:12] in ID_AA64ISAR0_EL1.
//
// Note from the documentation:
// If the value of SHA2 field is 0b0010,
// ID_AA64ISAR0_EL1. SHA3 must have the value 0b0001
//
// Disabling sm3 and sm4 CPU features. Setting to 0b0000.
// sm3 occupies bits [39:36] in ID_AA64ISAR0_EL1.
// sm4 occupies bits [43:40] in ID_AA64ISAR0_EL1.
//
// Note from the documentation:
// "This field (sm3) must have the same value as ID_AA64ISAR0_EL1.SM4."
//
// Disabling asimdfhm (fhm) CPU feature. Setting to 0b0000.
// fhm occupies bits [51:48] in ID_AA64ISAR0_EL1.
//
// Disabling flagm (ts) CPU feature. Setting to 0b0000.
// ts occupies bits [55:52] in ID_AA64ISAR0_EL1.
//
// Disabling rnd (rndr) CPU feature. Setting to 0b0000.
// rndr occupies bits [63:60] in ID_AA64ISAR0_EL1.
addr: ID_AA64ISAR0_EL1,
bitmap: RegisterValueFilter {
filter: 0xF0FF0FFF0000F000,
value: 0x0000000000001000,
},
},
RegisterModifier {
// Disabling dcpodp (dpb) CPU feature. Setting to 0b0001.
// dpb occupies bits [3:0] in ID_AA64ISAR1_EL1.
//
// Disabling jscvt CPU feature. Setting to 0b0000.
// jscvt occupies bits [15:12] in ID_AA64ISAR1_EL1.
//
// Disabling fcma CPU feature. Setting to 0b0000.
// fcma occupies bits [19:16] in ID_AA64ISAR1_EL1.
//
// Disabling ilrcpc CPU feature. Setting to 0b0001.
// lrcpc occupies bits [23:20] in ID_AA64ISAR1_EL1.
//
// Disabling bf16 CPU feature. Setting to 0b0000.
// bf16 occupies bits [47:44] in ID_AA64ISAR1_EL1.
//
// Disabling dgh CPU feature. Setting to 0b0000.
// dgh occupies bits [51:48] in ID_AA64ISAR1_EL1.
//
// Disabling i8mm CPU feature. Setting to 0b0000.
// i8mm occupies bits [55:52] in ID_AA64ISAR1_EL1.
addr: ID_AA64ISAR1_EL1,
bitmap: RegisterValueFilter {
filter: 0x00FFF00000FFF00F,
value: 0x0000000000100001,
},
},
RegisterModifier {
// Disable uscat (at) CPU feature. Setting to 0b0000.
// at occupies bits [35:28] in ID_AA64MMFR2_EL1.
addr: ID_AA64MMFR2_EL1,
bitmap: RegisterValueFilter {
filter: 0x0000000F00000000,
value: 0x0000000000000000,
},
},
],
..Default::default()
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/mod.rs | src/vmm/src/arch/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt;
use std::sync::LazyLock;
use log::warn;
use serde::{Deserialize, Serialize};
use vm_memory::GuestAddress;
/// Module for aarch64 related functionality.
#[cfg(target_arch = "aarch64")]
pub mod aarch64;
#[cfg(target_arch = "aarch64")]
pub use aarch64::kvm::{Kvm, KvmArchError, OptionalCapabilities};
#[cfg(target_arch = "aarch64")]
pub use aarch64::vcpu::*;
#[cfg(target_arch = "aarch64")]
pub use aarch64::vm::{ArchVm, ArchVmError, VmState};
#[cfg(target_arch = "aarch64")]
pub use aarch64::{
ConfigurationError, arch_memory_regions, configure_system_for_boot, get_kernel_start,
initrd_load_addr, layout::*, load_kernel,
};
/// Module for x86_64 related functionality.
#[cfg(target_arch = "x86_64")]
pub mod x86_64;
#[cfg(target_arch = "x86_64")]
pub use x86_64::kvm::{Kvm, KvmArchError};
#[cfg(target_arch = "x86_64")]
pub use x86_64::vcpu::*;
#[cfg(target_arch = "x86_64")]
pub use x86_64::vm::{ArchVm, ArchVmError, VmState};
#[cfg(target_arch = "x86_64")]
pub use crate::arch::x86_64::{
ConfigurationError, arch_memory_regions, configure_system_for_boot, get_kernel_start,
initrd_load_addr, layout::*, load_kernel,
};
/// Types of devices that can get attached to this platform.
#[derive(Clone, Debug, PartialEq, Eq, Hash, Copy, Serialize, Deserialize)]
pub enum DeviceType {
/// Device Type: Virtio.
Virtio(u32),
/// Device Type: Serial.
#[cfg(target_arch = "aarch64")]
Serial,
/// Device Type: RTC.
#[cfg(target_arch = "aarch64")]
Rtc,
/// Device Type: BootTimer.
BootTimer,
}
/// Default page size for the guest OS.
pub const GUEST_PAGE_SIZE: usize = 4096;
/// Get the size of the host page size.
pub fn host_page_size() -> usize {
/// Default page size for the host OS.
static PAGE_SIZE: LazyLock<usize> = LazyLock::new(|| {
// # Safety: Value always valid
let r = unsafe { libc::sysconf(libc::_SC_PAGESIZE) };
usize::try_from(r).unwrap_or_else(|_| {
warn!("Could not get host page size with sysconf, assuming default 4K host pages");
4096
})
});
*PAGE_SIZE
}
impl fmt::Display for DeviceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/// Supported boot protocols for
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum BootProtocol {
/// Linux 64-bit boot protocol
LinuxBoot,
#[cfg(target_arch = "x86_64")]
/// PVH boot protocol (x86/HVM direct boot ABI)
PvhBoot,
}
impl fmt::Display for BootProtocol {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match self {
BootProtocol::LinuxBoot => write!(f, "Linux 64-bit boot protocol"),
#[cfg(target_arch = "x86_64")]
BootProtocol::PvhBoot => write!(f, "PVH boot protocol"),
}
}
}
#[derive(Debug, Copy, Clone)]
/// Specifies the entry point address where the guest must start
/// executing code, as well as which boot protocol is to be used
/// to configure the guest initial state.
pub struct EntryPoint {
/// Address in guest memory where the guest must start execution
pub entry_addr: GuestAddress,
/// Specifies which boot protocol to use
pub protocol: BootProtocol,
}
/// Adds in [`regions`] the valid memory regions suitable for RAM taking into account a gap in the
/// available address space and returns the remaining region (if any) past this gap
fn arch_memory_regions_with_gap(
regions: &mut Vec<(GuestAddress, usize)>,
region_start: usize,
region_size: usize,
gap_start: usize,
gap_size: usize,
) -> Option<(usize, usize)> {
// 0-sized gaps don't really make sense. We should never receive such a gap.
assert!(gap_size > 0);
let first_addr_past_gap = gap_start + gap_size;
match (region_start + region_size).checked_sub(gap_start) {
// case0: region fits all before gap
None | Some(0) => {
regions.push((GuestAddress(region_start as u64), region_size));
None
}
// case1: region starts before the gap and goes past it
Some(remaining) if region_start < gap_start => {
regions.push((GuestAddress(region_start as u64), gap_start - region_start));
Some((first_addr_past_gap, remaining))
}
// case2: region starts past the gap
Some(_) => Some((first_addr_past_gap.max(region_start), region_size)),
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/vm.rs | src/vmm/src/arch/x86_64/vm.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt;
use std::sync::{Arc, Mutex};
use kvm_bindings::{
KVM_CLOCK_TSC_STABLE, KVM_IRQCHIP_IOAPIC, KVM_IRQCHIP_PIC_MASTER, KVM_IRQCHIP_PIC_SLAVE,
KVM_PIT_SPEAKER_DUMMY, MsrList, kvm_clock_data, kvm_irqchip, kvm_pit_config, kvm_pit_state2,
};
use kvm_ioctls::Cap;
use serde::{Deserialize, Serialize};
use crate::arch::x86_64::msr::MsrError;
use crate::snapshot::Persist;
use crate::utils::u64_to_usize;
use crate::vstate::bus::Bus;
use crate::vstate::memory::{GuestMemoryExtension, GuestMemoryState};
use crate::vstate::resources::ResourceAllocator;
use crate::vstate::vm::{VmCommon, VmError};
/// Error type for [`Vm::restore_state`]
#[allow(missing_docs)]
#[cfg(target_arch = "x86_64")]
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum ArchVmError {
/// Failed to check KVM capability (0): {1}
CheckCapability(Cap, kvm_ioctls::Error),
/// Set PIT2 error: {0}
SetPit2(kvm_ioctls::Error),
/// Set clock error: {0}
SetClock(kvm_ioctls::Error),
/// Set IrqChipPicMaster error: {0}
SetIrqChipPicMaster(kvm_ioctls::Error),
/// Set IrqChipPicSlave error: {0}
SetIrqChipPicSlave(kvm_ioctls::Error),
/// Set IrqChipIoAPIC error: {0}
SetIrqChipIoAPIC(kvm_ioctls::Error),
/// Failed to get KVM vm pit state: {0}
VmGetPit2(kvm_ioctls::Error),
/// Failed to get KVM vm clock: {0}
VmGetClock(kvm_ioctls::Error),
/// Failed to get KVM vm irqchip: {0}
VmGetIrqChip(kvm_ioctls::Error),
/// Failed to set KVM vm irqchip: {0}
VmSetIrqChip(kvm_ioctls::Error),
/// Failed to get MSR index list to save into snapshots: {0}
GetMsrsToSave(MsrError),
/// Failed during KVM_SET_TSS_ADDRESS: {0}
SetTssAddress(kvm_ioctls::Error),
}
/// Structure representing the current architecture's understand of what a "virtual machine" is.
#[derive(Debug)]
pub struct ArchVm {
/// Architecture independent parts of a vm
pub common: VmCommon,
msrs_to_save: MsrList,
/// Size in bytes requiring to hold the dynamically-sized `kvm_xsave` struct.
///
/// `None` if `KVM_CAP_XSAVE2` not supported.
xsave2_size: Option<usize>,
/// Port IO bus
pub pio_bus: Arc<Bus>,
}
impl ArchVm {
/// Create a new `Vm` struct.
pub fn new(kvm: &crate::vstate::kvm::Kvm) -> Result<ArchVm, VmError> {
let common = Self::create_common(kvm)?;
let msrs_to_save = kvm.msrs_to_save().map_err(ArchVmError::GetMsrsToSave)?;
// `KVM_CAP_XSAVE2` was introduced to support dynamically-sized XSTATE buffer in kernel
// v5.17. `KVM_GET_EXTENSION(KVM_CAP_XSAVE2)` returns the required size in byte if
// supported; otherwise returns 0.
// https://github.com/torvalds/linux/commit/be50b2065dfa3d88428fdfdc340d154d96bf6848
//
// Cache the value in order not to call it at each vCPU creation.
let xsave2_size = match common.fd.check_extension_int(Cap::Xsave2) {
// Catch all negative values just in case although the possible negative return value
// of ioctl() is only -1.
..=-1 => {
return Err(VmError::Arch(ArchVmError::CheckCapability(
Cap::Xsave2,
vmm_sys_util::errno::Error::last(),
)));
}
0 => None,
// SAFETY: Safe because negative values are handled above.
ret => Some(usize::try_from(ret).unwrap()),
};
common
.fd
.set_tss_address(u64_to_usize(crate::arch::x86_64::layout::KVM_TSS_ADDRESS))
.map_err(ArchVmError::SetTssAddress)?;
let pio_bus = Arc::new(Bus::new());
Ok(ArchVm {
common,
msrs_to_save,
xsave2_size,
pio_bus,
})
}
/// Pre-vCPU creation setup.
pub fn arch_pre_create_vcpus(&mut self, _: u8) -> Result<(), ArchVmError> {
// For x86_64 we need to create the interrupt controller before calling `KVM_CREATE_VCPUS`
self.setup_irqchip()
}
/// Post-vCPU creation setup.
pub fn arch_post_create_vcpus(&mut self, _: u8) -> Result<(), ArchVmError> {
Ok(())
}
/// Restores the KVM VM state.
///
/// # Errors
///
/// When:
/// - [`kvm_ioctls::VmFd::set_pit`] errors.
/// - [`kvm_ioctls::VmFd::set_clock`] errors.
/// - [`kvm_ioctls::VmFd::set_irqchip`] errors.
/// - [`kvm_ioctls::VmFd::set_irqchip`] errors.
/// - [`kvm_ioctls::VmFd::set_irqchip`] errors.
pub fn restore_state(&mut self, state: &VmState) -> Result<(), ArchVmError> {
self.fd()
.set_pit2(&state.pitstate)
.map_err(ArchVmError::SetPit2)?;
self.fd()
.set_clock(&state.clock)
.map_err(ArchVmError::SetClock)?;
self.fd()
.set_irqchip(&state.pic_master)
.map_err(ArchVmError::SetIrqChipPicMaster)?;
self.fd()
.set_irqchip(&state.pic_slave)
.map_err(ArchVmError::SetIrqChipPicSlave)?;
self.fd()
.set_irqchip(&state.ioapic)
.map_err(ArchVmError::SetIrqChipIoAPIC)?;
self.common.resource_allocator = Mutex::new(state.resource_allocator.clone());
Ok(())
}
/// Creates the irq chip and an in-kernel device model for the PIT.
pub fn setup_irqchip(&self) -> Result<(), ArchVmError> {
self.fd()
.create_irq_chip()
.map_err(ArchVmError::VmSetIrqChip)?;
// We need to enable the emulation of a dummy speaker port stub so that writing to port 0x61
// (i.e. KVM_SPEAKER_BASE_ADDRESS) does not trigger an exit to user space.
let pit_config = kvm_pit_config {
flags: KVM_PIT_SPEAKER_DUMMY,
..Default::default()
};
self.fd()
.create_pit2(pit_config)
.map_err(ArchVmError::VmSetIrqChip)
}
/// Saves and returns the Kvm Vm state.
pub fn save_state(&self) -> Result<VmState, ArchVmError> {
let pitstate = self.fd().get_pit2().map_err(ArchVmError::VmGetPit2)?;
let mut clock = self.fd().get_clock().map_err(ArchVmError::VmGetClock)?;
// This bit is not accepted in SET_CLOCK, clear it.
clock.flags &= !KVM_CLOCK_TSC_STABLE;
let mut pic_master = kvm_irqchip {
chip_id: KVM_IRQCHIP_PIC_MASTER,
..Default::default()
};
self.fd()
.get_irqchip(&mut pic_master)
.map_err(ArchVmError::VmGetIrqChip)?;
let mut pic_slave = kvm_irqchip {
chip_id: KVM_IRQCHIP_PIC_SLAVE,
..Default::default()
};
self.fd()
.get_irqchip(&mut pic_slave)
.map_err(ArchVmError::VmGetIrqChip)?;
let mut ioapic = kvm_irqchip {
chip_id: KVM_IRQCHIP_IOAPIC,
..Default::default()
};
self.fd()
.get_irqchip(&mut ioapic)
.map_err(ArchVmError::VmGetIrqChip)?;
Ok(VmState {
memory: self.common.guest_memory.describe(),
resource_allocator: self.resource_allocator().save(),
pitstate,
clock,
pic_master,
pic_slave,
ioapic,
})
}
/// Gets the list of MSRs to save when creating snapshots
pub fn msrs_to_save(&self) -> &[u32] {
self.msrs_to_save.as_slice()
}
/// Gets the size (in bytes) of the `kvm_xsave` struct.
pub fn xsave2_size(&self) -> Option<usize> {
self.xsave2_size
}
}
#[derive(Default, Deserialize, Serialize)]
/// Structure holding VM kvm state.
pub struct VmState {
/// guest memory state
pub memory: GuestMemoryState,
/// resource allocator
pub resource_allocator: ResourceAllocator,
pitstate: kvm_pit_state2,
clock: kvm_clock_data,
// TODO: rename this field to adopt inclusive language once Linux updates it, too.
pic_master: kvm_irqchip,
// TODO: rename this field to adopt inclusive language once Linux updates it, too.
pic_slave: kvm_irqchip,
ioapic: kvm_irqchip,
}
impl fmt::Debug for VmState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("VmState")
.field("pitstate", &self.pitstate)
.field("clock", &self.clock)
.field("pic_master", &"?")
.field("pic_slave", &"?")
.field("ioapic", &"?")
.finish()
}
}
#[cfg(test)]
mod tests {
use kvm_bindings::{
KVM_CLOCK_TSC_STABLE, KVM_IRQCHIP_IOAPIC, KVM_IRQCHIP_PIC_MASTER, KVM_IRQCHIP_PIC_SLAVE,
KVM_PIT_SPEAKER_DUMMY,
};
use crate::snapshot::Snapshot;
use crate::vstate::vm::VmState;
use crate::vstate::vm::tests::{setup_vm, setup_vm_with_memory};
#[cfg(target_arch = "x86_64")]
#[test]
fn test_vm_save_restore_state() {
let (_, vm) = setup_vm();
// Irqchips, clock and pitstate are not configured so trying to save state should fail.
vm.save_state().unwrap_err();
let (_, vm) = setup_vm_with_memory(0x1000);
vm.setup_irqchip().unwrap();
let vm_state = vm.save_state().unwrap();
assert_eq!(
vm_state.pitstate.flags | KVM_PIT_SPEAKER_DUMMY,
KVM_PIT_SPEAKER_DUMMY
);
assert_eq!(vm_state.clock.flags & KVM_CLOCK_TSC_STABLE, 0);
assert_eq!(vm_state.pic_master.chip_id, KVM_IRQCHIP_PIC_MASTER);
assert_eq!(vm_state.pic_slave.chip_id, KVM_IRQCHIP_PIC_SLAVE);
assert_eq!(vm_state.ioapic.chip_id, KVM_IRQCHIP_IOAPIC);
let (_, mut vm) = setup_vm_with_memory(0x1000);
vm.setup_irqchip().unwrap();
vm.restore_state(&vm_state).unwrap();
}
#[cfg(target_arch = "x86_64")]
#[test]
fn test_vm_save_restore_state_bad_irqchip() {
use kvm_bindings::KVM_NR_IRQCHIPS;
let (_, vm) = setup_vm_with_memory(0x1000);
vm.setup_irqchip().unwrap();
let mut vm_state = vm.save_state().unwrap();
let (_, mut vm) = setup_vm_with_memory(0x1000);
vm.setup_irqchip().unwrap();
// Try to restore an invalid PIC Master chip ID
let orig_master_chip_id = vm_state.pic_master.chip_id;
vm_state.pic_master.chip_id = KVM_NR_IRQCHIPS;
vm.restore_state(&vm_state).unwrap_err();
vm_state.pic_master.chip_id = orig_master_chip_id;
// Try to restore an invalid PIC Slave chip ID
let orig_slave_chip_id = vm_state.pic_slave.chip_id;
vm_state.pic_slave.chip_id = KVM_NR_IRQCHIPS;
vm.restore_state(&vm_state).unwrap_err();
vm_state.pic_slave.chip_id = orig_slave_chip_id;
// Try to restore an invalid IOPIC chip ID
vm_state.ioapic.chip_id = KVM_NR_IRQCHIPS;
vm.restore_state(&vm_state).unwrap_err();
}
#[cfg(target_arch = "x86_64")]
#[test]
fn test_vmstate_serde() {
let mut snapshot_data = vec![0u8; 10000];
let (_, mut vm) = setup_vm_with_memory(0x1000);
vm.setup_irqchip().unwrap();
let state = vm.save_state().unwrap();
Snapshot::new(state)
.save(&mut snapshot_data.as_mut_slice())
.unwrap();
let restored_state: VmState = Snapshot::load_without_crc_check(snapshot_data.as_slice())
.unwrap()
.data;
vm.restore_state(&restored_state).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/cpu_model.rs | src/vmm/src/arch/x86_64/cpu_model.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::arch::x86_64::__cpuid as host_cpuid;
use std::cmp::{Eq, PartialEq};
/// Structure representing x86_64 CPU model.
#[derive(Debug, Eq, PartialEq)]
pub struct CpuModel {
/// Extended family.
pub extended_family: u8,
/// Extended model.
pub extended_model: u8,
/// Family.
pub family: u8,
/// Model.
pub model: u8,
/// Stepping.
pub stepping: u8,
}
/// Family / Model / Stepping for Intel Skylake
pub const SKYLAKE_FMS: CpuModel = CpuModel {
extended_family: 0x0,
extended_model: 0x5,
family: 0x6,
model: 0x5,
stepping: 0x4,
};
/// Family / Model / Stepping for Intel Cascade Lake
pub const CASCADE_LAKE_FMS: CpuModel = CpuModel {
extended_family: 0x0,
extended_model: 0x5,
family: 0x6,
model: 0x5,
stepping: 0x7,
};
/// Family / Model / Stepping for Intel Ice Lake
pub const ICE_LAKE_FMS: CpuModel = CpuModel {
extended_family: 0x0,
extended_model: 0x6,
family: 0x6,
model: 0xa,
stepping: 0x6,
};
/// Family / Model / Stepping for AMD Milan
pub const MILAN_FMS: CpuModel = CpuModel {
extended_family: 0xa,
extended_model: 0x0,
family: 0xf,
model: 0x1,
stepping: 0x1,
};
impl CpuModel {
/// Get CPU model from current machine.
pub fn get_cpu_model() -> Self {
// SAFETY: This operation is safe as long as the processor implements this CPUID function.
// 0x1 is the defined code for getting the processor version information.
let eax = unsafe { host_cpuid(0x1) }.eax;
CpuModel::from(&eax)
}
}
impl From<&u32> for CpuModel {
fn from(eax: &u32) -> Self {
CpuModel {
extended_family: ((eax >> 20) & 0xff) as u8,
extended_model: ((eax >> 16) & 0xf) as u8,
family: ((eax >> 8) & 0xf) as u8,
model: ((eax >> 4) & 0xf) as u8,
stepping: (eax & 0xf) as u8,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cpu_model_from() {
let skylake_eax = 0x00050654;
assert_eq!(CpuModel::from(&skylake_eax), SKYLAKE_FMS);
let cascade_lake_eax = 0x00050657;
assert_eq!(CpuModel::from(&cascade_lake_eax), CASCADE_LAKE_FMS);
let ice_lake_eax = 0x000606a6;
assert_eq!(CpuModel::from(&ice_lake_eax), ICE_LAKE_FMS);
let milan_eax = 0x00a00f11;
assert_eq!(CpuModel::from(&milan_eax), MILAN_FMS);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/interrupts.rs | src/vmm/src/arch/x86_64/interrupts.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use kvm_bindings::kvm_lapic_state;
use kvm_ioctls::VcpuFd;
use zerocopy::IntoBytes;
use crate::utils::byte_order;
/// Errors thrown while configuring the LAPIC.
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum InterruptError {
/// Failure in getting the LAPIC configuration: {0}
GetLapic(kvm_ioctls::Error),
/// Failure in setting the LAPIC configuration: {0}
SetLapic(kvm_ioctls::Error),
}
// Defines poached from apicdef.h kernel header.
const APIC_LVT0: usize = 0x350;
const APIC_LVT1: usize = 0x360;
const APIC_MODE_NMI: u32 = 0x4;
const APIC_MODE_EXTINT: u32 = 0x7;
fn get_klapic_reg(klapic: &kvm_lapic_state, reg_offset: usize) -> u32 {
let range = reg_offset..reg_offset + 4;
let reg = klapic.regs.get(range).expect("get_klapic_reg range");
byte_order::read_le_u32(reg.as_bytes())
}
fn set_klapic_reg(klapic: &mut kvm_lapic_state, reg_offset: usize, value: u32) {
let range = reg_offset..reg_offset + 4;
let reg = klapic.regs.get_mut(range).expect("set_klapic_reg range");
byte_order::write_le_u32(reg.as_mut_bytes(), value);
}
fn set_apic_delivery_mode(reg: u32, mode: u32) -> u32 {
((reg) & !0x700) | ((mode) << 8)
}
/// Configures LAPICs. LAPIC0 is set for external interrupts, LAPIC1 is set for NMI.
///
/// # Arguments
/// * `vcpu` - The VCPU object to configure.
pub fn set_lint(vcpu: &VcpuFd) -> Result<(), InterruptError> {
let mut klapic = vcpu.get_lapic().map_err(InterruptError::GetLapic)?;
let lvt_lint0 = get_klapic_reg(&klapic, APIC_LVT0);
set_klapic_reg(
&mut klapic,
APIC_LVT0,
set_apic_delivery_mode(lvt_lint0, APIC_MODE_EXTINT),
);
let lvt_lint1 = get_klapic_reg(&klapic, APIC_LVT1);
set_klapic_reg(
&mut klapic,
APIC_LVT1,
set_apic_delivery_mode(lvt_lint1, APIC_MODE_NMI),
);
vcpu.set_lapic(&klapic).map_err(InterruptError::SetLapic)
}
#[cfg(test)]
mod tests {
use kvm_ioctls::Kvm;
use super::*;
const KVM_APIC_REG_SIZE: usize = 0x400;
#[test]
fn test_set_and_get_klapic_reg() {
let reg_offset = 0x340;
let mut klapic = kvm_lapic_state::default();
set_klapic_reg(&mut klapic, reg_offset, 3);
let value = get_klapic_reg(&klapic, reg_offset);
assert_eq!(value, 3);
}
#[test]
fn test_set_and_get_klapic_reg_overflow() {
let reg_offset = 0x340;
let mut klapic = kvm_lapic_state::default();
set_klapic_reg(
&mut klapic,
reg_offset,
u32::try_from(i32::MAX).unwrap() + 1u32,
);
let value = get_klapic_reg(&klapic, reg_offset);
assert_eq!(value, u32::try_from(i32::MAX).unwrap() + 1u32);
}
#[test]
#[should_panic]
fn test_set_and_get_klapic_out_of_bounds() {
let reg_offset = KVM_APIC_REG_SIZE + 10;
let mut klapic = kvm_lapic_state::default();
set_klapic_reg(&mut klapic, reg_offset, 3);
}
#[test]
fn test_apic_delivery_mode() {
let mut v: Vec<u32> = (0..20)
.map(|_| vmm_sys_util::rand::xor_pseudo_rng_u32())
.collect();
v.iter_mut()
.for_each(|x| *x = set_apic_delivery_mode(*x, 2));
let after: Vec<u32> = v.iter().map(|x| ((*x & !0x700) | ((2) << 8))).collect();
assert_eq!(v, after);
}
#[test]
fn test_setlint() {
let kvm = Kvm::new().unwrap();
assert!(kvm.check_extension(kvm_ioctls::Cap::Irqchip));
let vm = kvm.create_vm().unwrap();
// the get_lapic ioctl will fail if there is no irqchip created beforehand.
vm.create_irq_chip().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let klapic_before: kvm_lapic_state = vcpu.get_lapic().unwrap();
// Compute the value that is expected to represent LVT0 and LVT1.
let lint0 = get_klapic_reg(&klapic_before, APIC_LVT0);
let lint1 = get_klapic_reg(&klapic_before, APIC_LVT1);
let lint0_mode_expected = set_apic_delivery_mode(lint0, APIC_MODE_EXTINT);
let lint1_mode_expected = set_apic_delivery_mode(lint1, APIC_MODE_NMI);
set_lint(&vcpu).unwrap();
// Compute the value that represents LVT0 and LVT1 after set_lint.
let klapic_actual: kvm_lapic_state = vcpu.get_lapic().unwrap();
let lint0_mode_actual = get_klapic_reg(&klapic_actual, APIC_LVT0);
let lint1_mode_actual = get_klapic_reg(&klapic_actual, APIC_LVT1);
assert_eq!(lint0_mode_expected, lint0_mode_actual);
assert_eq!(lint1_mode_expected, lint1_mode_actual);
}
#[test]
fn test_setlint_fails() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
// 'get_lapic' ioctl triggered by the 'set_lint' function will fail if there is no
// irqchip created beforehand.
set_lint(&vcpu).unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/kvm.rs | src/vmm/src/arch/x86_64/kvm.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use kvm_bindings::{CpuId, KVM_MAX_CPUID_ENTRIES, MsrList};
use kvm_ioctls::Kvm as KvmFd;
use crate::arch::x86_64::xstate::{XstateError, request_dynamic_xstate_features};
use crate::cpu_config::templates::KvmCapability;
/// Architecture specific error for KVM initialization
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum KvmArchError {
/// Failed to get supported cpuid: {0}
GetSupportedCpuId(kvm_ioctls::Error),
/// Failed to request permission for dynamic XSTATE features: {0}
XstateFeatures(XstateError),
}
/// Struct with kvm fd and kvm associated parameters.
#[derive(Debug)]
pub struct Kvm {
/// KVM fd.
pub fd: KvmFd,
/// Additional capabilities that were specified in cpu template.
pub kvm_cap_modifiers: Vec<KvmCapability>,
/// Supported CpuIds.
pub supported_cpuid: CpuId,
}
impl Kvm {
pub(crate) const DEFAULT_CAPABILITIES: [u32; 14] = [
kvm_bindings::KVM_CAP_IRQCHIP,
kvm_bindings::KVM_CAP_IOEVENTFD,
kvm_bindings::KVM_CAP_IRQFD,
kvm_bindings::KVM_CAP_USER_MEMORY,
kvm_bindings::KVM_CAP_SET_TSS_ADDR,
kvm_bindings::KVM_CAP_PIT2,
kvm_bindings::KVM_CAP_PIT_STATE2,
kvm_bindings::KVM_CAP_ADJUST_CLOCK,
kvm_bindings::KVM_CAP_DEBUGREGS,
kvm_bindings::KVM_CAP_MP_STATE,
kvm_bindings::KVM_CAP_VCPU_EVENTS,
kvm_bindings::KVM_CAP_XCRS,
kvm_bindings::KVM_CAP_XSAVE,
kvm_bindings::KVM_CAP_EXT_CPUID,
];
/// Initialize [`Kvm`] type for x86_64 architecture
pub fn init_arch(
fd: KvmFd,
kvm_cap_modifiers: Vec<KvmCapability>,
) -> Result<Self, KvmArchError> {
request_dynamic_xstate_features().map_err(KvmArchError::XstateFeatures)?;
let supported_cpuid = fd
.get_supported_cpuid(KVM_MAX_CPUID_ENTRIES)
.map_err(KvmArchError::GetSupportedCpuId)?;
Ok(Kvm {
fd,
kvm_cap_modifiers,
supported_cpuid,
})
}
/// Msrs needed to be saved on snapshot creation.
pub fn msrs_to_save(&self) -> Result<MsrList, crate::arch::x86_64::msr::MsrError> {
crate::arch::x86_64::msr::get_msrs_to_save(&self.fd)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/xstate.rs | src/vmm/src/arch/x86_64/xstate.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm_sys_util::syscall::SyscallReturnCode;
use crate::arch::x86_64::generated::arch_prctl;
use crate::logger::info;
const INTEL_AMX_MASK: u64 = 1u64 << arch_prctl::ARCH_XCOMP_TILEDATA;
/// Errors assocaited with x86_64's dynamic XSAVE state features.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum XstateError {
/// Failed to get supported XSTATE features: {0}
GetSupportedFeatures(std::io::Error),
/// Failed to request permission for XSTATE feature ({0}): {1}
RequestFeaturePermission(u32, std::io::Error),
}
/// Request permission for all dynamic XSTATE features.
///
/// Some XSTATE features are not permitted by default, because they may require a larger area to
/// save their states than the tranditional 4096-byte area. Instead, the permission for them can be
/// requested via arch_prctl().
/// https://github.com/torvalds/linux/blob/master/Documentation/arch/x86/xstate.rst
///
/// Firecracker requests permission for them by default if available in order to retrieve the
/// full supported feature set via KVM_GET_SUPPORTED_CPUID.
/// https://docs.kernel.org/virt/kvm/api.html#kvm-get-supported-cpuid
///
/// Note that requested features can be masked by a CPU template.
pub fn request_dynamic_xstate_features() -> Result<(), XstateError> {
let supported_xfeatures =
match get_supported_xfeatures().map_err(XstateError::GetSupportedFeatures)? {
Some(supported_xfeatures) => supported_xfeatures,
// Exit early if dynamic XSTATE feature enabling is not supported on the kernel.
None => return Ok(()),
};
// Intel AMX's TILEDATA
//
// Unless requested, on kernels prior to v6.4, KVM_GET_SUPPORTED_CPUID returns an
// inconsistent state where TILECFG is set but TILEDATA isn't. Such a half-enabled state
// causes guest crash during boot because a guest calls XSETBV instruction with all
// XSAVE feature bits enumerated on CPUID and XSETBV only accepts either of both Intel
// AMX bits enabled or disabled; otherwise resulting in general protection fault.
// https://lore.kernel.org/all/20230405004520.421768-1-seanjc@google.com/
if supported_xfeatures & INTEL_AMX_MASK == INTEL_AMX_MASK {
request_xfeature_permission(arch_prctl::ARCH_XCOMP_TILEDATA).map_err(|err| {
XstateError::RequestFeaturePermission(arch_prctl::ARCH_XCOMP_TILEDATA, err)
})?;
}
Ok(())
}
/// Get supported XSTATE features
///
/// Returns Ok(None) if dynamic XSTATE feature enabling is not supported.
fn get_supported_xfeatures() -> Result<Option<u64>, std::io::Error> {
let mut supported_xfeatures: u64 = 0;
// SAFETY: Safe because the third input (`addr`) is a valid `c_ulong` pointer.
// https://man7.org/linux/man-pages/man2/arch_prctl.2.html
match SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_arch_prctl,
arch_prctl::ARCH_GET_XCOMP_SUPP,
&mut supported_xfeatures as *mut libc::c_ulong,
)
})
.into_empty_result()
{
Ok(()) => Ok(Some(supported_xfeatures)),
// EINVAL is returned if the dynamic XSTATE feature enabling is not supported (e.g. kernel
// version prior to v5.16).
// https://github.com/torvalds/linux/commit/db8268df0983adc2bb1fb48c9e5f7bfbb5f617f3
Err(err) if err.raw_os_error() == Some(libc::EINVAL) => {
info!("Dynamic XSTATE feature enabling is not supported.");
Ok(None)
}
Err(err) => Err(err),
}
}
/// Request permission for a dynamic XSTATE feature.
///
/// This should be called after `get_supported_xfeatures()` that retrieves supported dynamic XSTATE
/// features.
///
/// Returns Ok(()) if the permission request succeeded or dynamic XSTATE feature enabling for
/// "guest" is not supported.
fn request_xfeature_permission(xfeature: u32) -> Result<(), std::io::Error> {
// SAFETY: Safe because the third input (`addr`) is a valid `c_ulong` value.
// https://man7.org/linux/man-pages/man2/arch_prctl.2.html
match SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_arch_prctl,
arch_prctl::ARCH_REQ_XCOMP_GUEST_PERM as libc::c_ulong,
xfeature as libc::c_ulong,
)
})
.into_empty_result()
{
Ok(()) => Ok(()),
// EINVAL is returned if the dynamic XSTATE feature enabling for "guest" is not supported
// although that for "userspace application" is supported (e.g. kernel versions >= 5.16 and
// < 5.17).
// https://github.com/torvalds/linux/commit/980fe2fddcff21937c93532b4597c8ea450346c1
//
// Note that XFEATURE_MASK_XTILE (= XFEATURE_MASK_XTILE_DATA | XFEATURE_MASK_XTILE_CFG) was
// also added to KVM_SUPPORTED_XCR0 in kernel v5.17. KVM_SUPPORTED_XCR0 is used to
// initialize the guest-supported XCR0. Thus, KVM_GET_SUPPORTED_CPUID doesn't
// return AMX-half-enabled state, where XTILE_CFG is set but XTILE_DATA is unset, on such
// kernels.
// https://github.com/torvalds/linux/commit/86aff7a4799286635efd94dab17b513544703cad
// https://github.com/torvalds/linux/blame/f443e374ae131c168a065ea1748feac6b2e76613/arch/x86/kvm/x86.c#L8850-L8853
// https://github.com/firecracker-microvm/firecracker/pull/5065
Err(err) if err.raw_os_error() == Some(libc::EINVAL) => {
info!("Dynamic XSTATE feature enabling is not supported for guest.");
Ok(())
}
Err(err) => Err(err),
}
}
#[cfg(test)]
mod tests {
use super::*;
// Get permitted XSTATE features.
fn get_permitted_xstate_features() -> Result<u64, std::io::Error> {
let mut permitted_xfeatures: u64 = 0;
// SAFETY: Safe because the third input (`addr`) is a valid `c_ulong` pointer.
match SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_arch_prctl,
arch_prctl::ARCH_GET_XCOMP_GUEST_PERM,
&mut permitted_xfeatures as *mut libc::c_ulong,
)
})
.into_empty_result()
{
Ok(()) => Ok(permitted_xfeatures),
Err(err) => Err(err),
}
}
#[test]
fn test_request_xstate_feature_permission() {
request_dynamic_xstate_features().unwrap();
let supported_xfeatures = match get_supported_xfeatures().unwrap() {
Some(supported_xfeatures) => supported_xfeatures,
// Nothing to test if dynamic XSTATE feature enabling is not supported on the kernel.
None => return,
};
// Check each dynamic feature is enabled. (currently only Intel AMX TILEDATA)
if supported_xfeatures & INTEL_AMX_MASK == INTEL_AMX_MASK {
let permitted_xfeatures = get_permitted_xstate_features().unwrap();
assert_eq!(permitted_xfeatures & INTEL_AMX_MASK, INTEL_AMX_MASK);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/vcpu.rs | src/vmm/src/arch/x86_64/vcpu.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::collections::BTreeMap;
use std::fmt::Debug;
use std::sync::Arc;
use kvm_bindings::{
CpuId, KVM_MAX_CPUID_ENTRIES, KVM_MAX_MSR_ENTRIES, Msrs, Xsave, kvm_debugregs, kvm_lapic_state,
kvm_mp_state, kvm_regs, kvm_sregs, kvm_vcpu_events, kvm_xcrs, kvm_xsave, kvm_xsave2,
};
use kvm_ioctls::{VcpuExit, VcpuFd};
use log::{error, warn};
use serde::{Deserialize, Serialize};
use vmm_sys_util::fam::{self, FamStruct};
use crate::arch::EntryPoint;
use crate::arch::x86_64::generated::msr_index::{MSR_IA32_TSC, MSR_IA32_TSC_DEADLINE};
use crate::arch::x86_64::interrupts;
use crate::arch::x86_64::msr::{MsrError, create_boot_msr_entries};
use crate::arch::x86_64::regs::{SetupFpuError, SetupRegistersError, SetupSpecialRegistersError};
use crate::cpu_config::x86_64::{CpuConfiguration, cpuid};
use crate::logger::{IncMetric, METRICS};
use crate::vstate::bus::Bus;
use crate::vstate::memory::GuestMemoryMmap;
use crate::vstate::vcpu::{VcpuConfig, VcpuEmulation, VcpuError};
use crate::vstate::vm::Vm;
// Tolerance for TSC frequency expected variation.
// The value of 250 parts per million is based on
// the QEMU approach, more details here:
// https://bugzilla.redhat.com/show_bug.cgi?id=1839095
const TSC_KHZ_TOL_NUMERATOR: i64 = 250;
const TSC_KHZ_TOL_DENOMINATOR: i64 = 1_000_000;
/// A set of MSRs that should be restored separately after all other MSRs have already been restored
const DEFERRED_MSRS: [u32; 1] = [
// MSR_IA32_TSC_DEADLINE must be restored after MSR_IA32_TSC, otherwise we risk "losing" timer
// interrupts across the snapshot restore boundary (due to KVM querying MSR_IA32_TSC upon
// writes to the TSC_DEADLINE MSR to determine whether it needs to prime a timer - if
// MSR_IA32_TSC is not initialized correctly, it can wrongly assume no timer needs to be
// primed, or the timer can be initialized with a wrong expiry).
MSR_IA32_TSC_DEADLINE,
];
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum KvmVcpuError {
/// Failed to convert `kvm_bindings::CpuId` to `Cpuid`: {0}
ConvertCpuidType(#[from] cpuid::CpuidTryFromKvmCpuid),
/// Failed FamStructWrapper operation: {0}
Fam(#[from] vmm_sys_util::fam::Error),
/// Failed to get dumpable MSR index list: {0}
GetMsrsToDump(#[from] crate::arch::x86_64::msr::MsrError),
/// Cannot open the VCPU file descriptor: {0}
VcpuFd(kvm_ioctls::Error),
/// Failed to get KVM vcpu debug regs: {0}
VcpuGetDebugRegs(kvm_ioctls::Error),
/// Failed to get KVM vcpu lapic: {0}
VcpuGetLapic(kvm_ioctls::Error),
/// Failed to get KVM vcpu mp state: {0}
VcpuGetMpState(kvm_ioctls::Error),
/// Failed to get KVM vcpu msr: {0:#x}
VcpuGetMsr(u32),
/// Failed to get KVM vcpu msrs: {0}
VcpuGetMsrs(kvm_ioctls::Error),
/// Failed to get KVM vcpu regs: {0}
VcpuGetRegs(kvm_ioctls::Error),
/// Failed to get KVM vcpu sregs: {0}
VcpuGetSregs(kvm_ioctls::Error),
/// Failed to get KVM vcpu event: {0}
VcpuGetVcpuEvents(kvm_ioctls::Error),
/// Failed to get KVM vcpu xcrs: {0}
VcpuGetXcrs(kvm_ioctls::Error),
/// Failed to get KVM vcpu xsave via KVM_GET_XSAVE: {0}
VcpuGetXsave(kvm_ioctls::Error),
/// Failed to get KVM vcpu xsave via KVM_GET_XSAVE2: {0}
VcpuGetXsave2(kvm_ioctls::Error),
/// Failed to get KVM vcpu cpuid: {0}
VcpuGetCpuid(kvm_ioctls::Error),
/// Failed to get KVM TSC frequency: {0}
VcpuGetTsc(kvm_ioctls::Error),
/// Failed to set KVM vcpu cpuid: {0}
VcpuSetCpuid(kvm_ioctls::Error),
/// Failed to set KVM vcpu debug regs: {0}
VcpuSetDebugRegs(kvm_ioctls::Error),
/// Failed to set KVM vcpu lapic: {0}
VcpuSetLapic(kvm_ioctls::Error),
/// Failed to set KVM vcpu mp state: {0}
VcpuSetMpState(kvm_ioctls::Error),
/// Failed to set KVM vcpu msrs: {0}
VcpuSetMsrs(kvm_ioctls::Error),
/// Failed to set all KVM MSRs for this vCPU. Only a partial write was done.
VcpuSetMsrsIncomplete,
/// Failed to set KVM vcpu regs: {0}
VcpuSetRegs(kvm_ioctls::Error),
/// Failed to set KVM vcpu sregs: {0}
VcpuSetSregs(kvm_ioctls::Error),
/// Failed to set KVM vcpu event: {0}
VcpuSetVcpuEvents(kvm_ioctls::Error),
/// Failed to set KVM vcpu xcrs: {0}
VcpuSetXcrs(kvm_ioctls::Error),
/// Failed to set KVM vcpu xsave: {0}
VcpuSetXsave(kvm_ioctls::Error),
}
/// Error type for [`KvmVcpu::get_tsc_khz`] and [`KvmVcpu::is_tsc_scaling_required`].
#[derive(Debug, thiserror::Error, derive_more::From, Eq, PartialEq)]
#[error("{0}")]
pub struct GetTscError(vmm_sys_util::errno::Error);
/// Error type for [`KvmVcpu::set_tsc_khz`].
#[derive(Debug, thiserror::Error, Eq, PartialEq)]
#[error("{0}")]
pub struct SetTscError(#[from] kvm_ioctls::Error);
/// Error type for [`KvmVcpu::configure`].
#[derive(Debug, thiserror::Error, displaydoc::Display, Eq, PartialEq)]
pub enum KvmVcpuConfigureError {
/// Failed to convert `Cpuid` to `kvm_bindings::CpuId`: {0}
ConvertCpuidType(#[from] vmm_sys_util::fam::Error),
/// Failed to apply modifications to CPUID: {0}
NormalizeCpuidError(#[from] cpuid::NormalizeCpuidError),
/// Failed to set CPUID: {0}
SetCpuid(#[from] vmm_sys_util::errno::Error),
/// Failed to set MSRs: {0}
SetMsrs(#[from] MsrError),
/// Failed to setup registers: {0}
SetupRegisters(#[from] SetupRegistersError),
/// Failed to setup FPU: {0}
SetupFpu(#[from] SetupFpuError),
/// Failed to setup special registers: {0}
SetupSpecialRegisters(#[from] SetupSpecialRegistersError),
/// Failed to configure LAPICs: {0}
SetLint(#[from] interrupts::InterruptError),
}
/// A wrapper around creating and using a kvm x86_64 vcpu.
#[derive(Debug)]
pub struct KvmVcpu {
/// Index of vcpu.
pub index: u8,
/// KVM vcpu fd.
pub fd: VcpuFd,
/// Vcpu peripherals, such as buses
pub peripherals: Peripherals,
/// The list of MSRs to include in a VM snapshot, in the same order as KVM returned them
/// from KVM_GET_MSR_INDEX_LIST
msrs_to_save: Vec<u32>,
/// Size in bytes requiring to hold the dynamically-sized `kvm_xsave` struct.
///
/// `None` if `KVM_CAP_XSAVE2` not supported.
xsave2_size: Option<usize>,
}
/// Vcpu peripherals
#[derive(Default, Debug)]
pub struct Peripherals {
/// Pio bus.
pub pio_bus: Option<Arc<Bus>>,
/// Mmio bus.
pub mmio_bus: Option<Arc<Bus>>,
}
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn new(index: u8, vm: &Vm) -> Result<Self, KvmVcpuError> {
let kvm_vcpu = vm
.fd()
.create_vcpu(index.into())
.map_err(KvmVcpuError::VcpuFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
peripherals: Default::default(),
msrs_to_save: vm.msrs_to_save().to_vec(),
xsave2_size: vm.xsave2_size(),
})
}
/// Configures a x86_64 specific vcpu for booting Linux and should be called once per vcpu.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_entry_point` - Specifies the boot protocol and offset from `guest_mem` at which
/// the kernel starts.
/// * `vcpu_config` - The vCPU configuration.
/// * `cpuid` - The capabilities exposed by this vCPU.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_entry_point: EntryPoint,
vcpu_config: &VcpuConfig,
) -> Result<(), KvmVcpuConfigureError> {
let mut cpuid = vcpu_config.cpu_config.cpuid.clone();
// Apply machine specific changes to CPUID.
cpuid.normalize(
// The index of the current logical CPU in the range [0..cpu_count].
self.index,
// The total number of logical CPUs.
vcpu_config.vcpu_count,
// The number of bits needed to enumerate logical CPUs per core.
u8::from(vcpu_config.vcpu_count > 1 && vcpu_config.smt),
)?;
// Set CPUID.
let kvm_cpuid = kvm_bindings::CpuId::try_from(cpuid)?;
// Set CPUID in the KVM
self.fd
.set_cpuid2(&kvm_cpuid)
.map_err(KvmVcpuConfigureError::SetCpuid)?;
// Clone MSR entries that are modified by CPU template from `VcpuConfig`.
let mut msrs = vcpu_config.cpu_config.msrs.clone();
self.msrs_to_save.extend(msrs.keys());
// Apply MSR modification to comply the linux boot protocol.
create_boot_msr_entries().into_iter().for_each(|entry| {
msrs.insert(entry.index, entry.data);
});
// TODO - Add/amend MSRs for vCPUs based on cpu_config
// By this point the Guest CPUID is established. Some CPU features require MSRs
// to configure and interact with those features. If a MSR is writable from
// inside the Guest, or is changed by KVM or Firecracker on behalf of the Guest,
// then we will need to save it every time we take a snapshot, and restore its
// value when we restore the microVM since the Guest may need that value.
// Since CPUID tells us what features are enabled for the Guest, we can infer
// the extra MSRs that we need to save based on a dependency map.
let extra_msrs = cpuid::common::msrs_to_save_by_cpuid(&kvm_cpuid);
self.msrs_to_save.extend(extra_msrs);
// TODO: Some MSRs depend on values of other MSRs. This dependency will need to
// be implemented.
// By this point we know that at snapshot, the list of MSRs we need to
// save is `architectural MSRs` + `MSRs inferred through CPUID` + `other
// MSRs defined by the template`
let kvm_msrs = msrs
.into_iter()
.map(|entry| kvm_bindings::kvm_msr_entry {
index: entry.0,
data: entry.1,
..Default::default()
})
.collect::<Vec<_>>();
crate::arch::x86_64::msr::set_msrs(&self.fd, &kvm_msrs)?;
crate::arch::x86_64::regs::setup_regs(&self.fd, kernel_entry_point)?;
crate::arch::x86_64::regs::setup_fpu(&self.fd)?;
crate::arch::x86_64::regs::setup_sregs(guest_mem, &self.fd, kernel_entry_point.protocol)?;
crate::arch::x86_64::interrupts::set_lint(&self.fd)?;
Ok(())
}
/// Sets a Port Mapped IO bus for this vcpu.
pub fn set_pio_bus(&mut self, pio_bus: Arc<Bus>) {
self.peripherals.pio_bus = Some(pio_bus);
}
/// Calls KVM_KVMCLOCK_CTRL to avoid guest soft lockup watchdog panics on resume.
/// See https://docs.kernel.org/virt/kvm/api.html .
pub fn kvmclock_ctrl(&self) {
// We do not want to fail if the call is not successful, because that may be acceptable
// depending on the workload. For example, EINVAL is returned if kvm-clock is not
// activated (e.g., no-kvmclock is specified in the guest kernel parameter).
// https://elixir.bootlin.com/linux/v6.17.5/source/arch/x86/kvm/x86.c#L5736-L5737
if let Err(err) = self.fd.kvmclock_ctrl() {
METRICS.vcpu.kvmclock_ctrl_fails.inc();
warn!("KVM_KVMCLOCK_CTRL call failed {}", err);
}
}
/// Get the current XSAVE state for this vCPU.
///
/// The C `kvm_xsave` struct was extended by adding a flexible array member (FAM) in the end
/// to support variable-sized XSTATE buffer.
///
/// https://elixir.bootlin.com/linux/v6.13.6/source/arch/x86/include/uapi/asm/kvm.h#L381
/// ```c
/// struct kvm_xsave {
/// __u32 region[1024];
/// __u32 extra[];
/// };
/// ```
///
/// As shown above, the C `kvm_xsave` struct does not have any field for the size of itself or
/// the length of its FAM. The required size (in bytes) of `kvm_xsave` struct can be retrieved
/// via `KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)`.
///
/// kvm-bindings defines `kvm_xsave2` struct that wraps the `kvm_xsave` struct to have `len`
/// field that indicates the number of FAM entries (i.e. `extra`), it also defines `Xsave` as
/// a `FamStructWrapper` of `kvm_xsave2`.
///
/// https://github.com/rust-vmm/kvm/blob/68fff5491703bf32bd35656f7ba994a4cae9ea7d/kvm-bindings/src/x86_64/fam_wrappers.rs#L106
/// ```rs
/// pub struct kvm_xsave2 {
/// pub len: usize,
/// pub xsave: kvm_xsave,
/// }
/// ```
fn get_xsave(&self) -> Result<Xsave, KvmVcpuError> {
match self.xsave2_size {
// if `KVM_CAP_XSAVE2` supported
Some(xsave2_size) => {
// Convert the `kvm_xsave` size in bytes to the length of FAM (i.e. `extra`).
let fam_len =
// Calculate the size of FAM (`extra`) area in bytes. Note that the subtraction
// never underflows because `KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)` always returns
// at least 4096 bytes that is the size of `kvm_xsave` without FAM area.
(xsave2_size - std::mem::size_of::<kvm_xsave>())
// Divide by the size of FAM (`extra`) entry (i.e. `__u32`).
.div_ceil(std::mem::size_of::<<kvm_xsave2 as FamStruct>::Entry>());
let mut xsave = Xsave::new(fam_len).map_err(KvmVcpuError::Fam)?;
// SAFETY: Safe because `xsave` is allocated with enough size to save XSTATE.
unsafe { self.fd.get_xsave2(&mut xsave) }.map_err(KvmVcpuError::VcpuGetXsave2)?;
Ok(xsave)
}
// if `KVM_CAP_XSAVE2` not supported
None => Ok(
// SAFETY: The content is correctly laid out.
unsafe {
Xsave::from_raw(vec![kvm_xsave2 {
// Note that `len` is the number of FAM (`extra`) entries that didn't exist
// on older kernels not supporting `KVM_CAP_XSAVE2`. Thus, it's always zero.
len: 0,
xsave: self.fd.get_xsave().map_err(KvmVcpuError::VcpuGetXsave)?,
}])
},
),
}
}
/// Get the current TSC frequency for this vCPU.
///
/// # Errors
///
/// When [`kvm_ioctls::VcpuFd::get_tsc_khz`] errors.
pub fn get_tsc_khz(&self) -> Result<u32, GetTscError> {
let res = self.fd.get_tsc_khz()?;
Ok(res)
}
/// Get CPUID for this vCPU.
///
/// Opposed to KVM_GET_SUPPORTED_CPUID, KVM_GET_CPUID2 does not update "nent" with valid number
/// of entries on success. Thus, when it passes "num_entries" greater than required, zeroed
/// entries follow after valid entries. This function removes such zeroed empty entries.
///
/// # Errors
///
/// * When [`kvm_ioctls::VcpuFd::get_cpuid2`] returns errors.
fn get_cpuid(&self) -> Result<kvm_bindings::CpuId, KvmVcpuError> {
let mut cpuid = self
.fd
.get_cpuid2(KVM_MAX_CPUID_ENTRIES)
.map_err(KvmVcpuError::VcpuGetCpuid)?;
// As CPUID.0h:EAX should have the largest CPUID standard function, we don't need to check
// EBX, ECX and EDX to confirm whether it is a valid entry.
cpuid.retain(|entry| {
!(entry.function == 0 && entry.index == 0 && entry.flags == 0 && entry.eax == 0)
});
Ok(cpuid)
}
/// If the IA32_TSC_DEADLINE MSR value is zero, update it
/// with the IA32_TSC value to guarantee that
/// the vCPU will continue receiving interrupts after restoring from a snapshot.
///
/// Rationale: we observed that sometimes when taking a snapshot,
/// the IA32_TSC_DEADLINE MSR is cleared, but the interrupt is not
/// delivered to the guest, leading to a situation where one
/// of the vCPUs never receives TSC interrupts after restoring,
/// until the MSR is updated externally, eg by setting the system time.
fn fix_zero_tsc_deadline_msr(msr_chunks: &mut [Msrs]) {
// We do not expect more than 1 TSC MSR entry, but if there are multiple, pick the maximum.
let max_tsc_value = msr_chunks
.iter()
.flat_map(|msrs| msrs.as_slice())
.filter(|msr| msr.index == MSR_IA32_TSC)
.map(|msr| msr.data)
.max();
if let Some(tsc_value) = max_tsc_value {
msr_chunks
.iter_mut()
.flat_map(|msrs| msrs.as_mut_slice())
.filter(|msr| msr.index == MSR_IA32_TSC_DEADLINE && msr.data == 0)
.for_each(|msr| {
warn!(
"MSR_IA32_TSC_DEADLINE is 0, replacing with {:#x}.",
tsc_value
);
msr.data = tsc_value;
});
}
}
/// Looks for MSRs from the [`DEFERRED_MSRS`] array and removes them from `msr_chunks`.
/// Returns a new [`Msrs`] object containing all the removed MSRs.
///
/// We use this to capture some causal dependencies between MSRs where the relative order
/// of restoration matters (e.g. MSR_IA32_TSC must be restored before MSR_IA32_TSC_DEADLINE).
fn extract_deferred_msrs(msr_chunks: &mut [Msrs]) -> Result<Msrs, fam::Error> {
// Use 0 here as FamStructWrapper doesn't really give an equivalent of `Vec::with_capacity`,
// and if we specify something N != 0 here, then it will create a FamStructWrapper with N
// elements pre-allocated and zero'd out. Unless we then actually "fill" all those N values,
// KVM will later yell at us about invalid MSRs.
let mut deferred_msrs = Msrs::new(0)?;
for msrs in msr_chunks {
msrs.retain(|msr| {
if DEFERRED_MSRS.contains(&msr.index) {
deferred_msrs
.push(*msr)
.inspect_err(|err| {
error!(
"Failed to move MSR {} into later chunk: {:?}",
msr.index, err
)
})
.is_err()
} else {
true
}
});
}
Ok(deferred_msrs)
}
/// Get MSR chunks for the given MSR index list.
///
/// KVM only supports getting `KVM_MAX_MSR_ENTRIES` at a time, so we divide
/// the list of MSR indices into chunks, call `KVM_GET_MSRS` for each
/// chunk, and collect into a [`Vec<Msrs>`].
///
/// # Arguments
///
/// * `msr_index_iter`: Iterator over MSR indices.
///
/// # Errors
///
/// * When [`kvm_bindings::Msrs::new`] returns errors.
/// * When [`kvm_ioctls::VcpuFd::get_msrs`] returns errors.
/// * When the return value of [`kvm_ioctls::VcpuFd::get_msrs`] (the number of entries that
/// could be gotten) is less than expected.
fn get_msr_chunks(
&self,
mut msr_index_iter: impl ExactSizeIterator<Item = u32>,
) -> Result<Vec<Msrs>, KvmVcpuError> {
let num_chunks = msr_index_iter.len().div_ceil(KVM_MAX_MSR_ENTRIES);
// + 1 for the chunk of deferred MSRs
let mut msr_chunks: Vec<Msrs> = Vec::with_capacity(num_chunks + 1);
for _ in 0..num_chunks {
let chunk_len = msr_index_iter.len().min(KVM_MAX_MSR_ENTRIES);
let chunk = self.get_msr_chunk(&mut msr_index_iter, chunk_len)?;
msr_chunks.push(chunk);
}
Self::fix_zero_tsc_deadline_msr(&mut msr_chunks);
let deferred = Self::extract_deferred_msrs(&mut msr_chunks)?;
msr_chunks.push(deferred);
Ok(msr_chunks)
}
/// Get single MSR chunk for the given MSR index iterator with
/// specified length. Iterator should have enough elements
/// to fill the chunk with indices, otherwise KVM will
/// return an error when processing half filled chunk.
///
/// # Arguments
///
/// * `msr_index_iter`: Iterator over MSR indices.
/// * `chunk_size`: Length of a chunk.
///
/// # Errors
///
/// * When [`kvm_bindings::Msrs::new`] returns errors.
/// * When [`kvm_ioctls::VcpuFd::get_msrs`] returns errors.
/// * When the return value of [`kvm_ioctls::VcpuFd::get_msrs`] (the number of entries that
/// could be gotten) is less than expected.
pub fn get_msr_chunk(
&self,
msr_index_iter: impl Iterator<Item = u32>,
chunk_size: usize,
) -> Result<Msrs, KvmVcpuError> {
let chunk_iter = msr_index_iter.take(chunk_size);
let mut msrs = Msrs::new(chunk_size)?;
let msr_entries = msrs.as_mut_slice();
for (pos, msr_index) in chunk_iter.enumerate() {
msr_entries[pos].index = msr_index;
}
let nmsrs = self
.fd
.get_msrs(&mut msrs)
.map_err(KvmVcpuError::VcpuGetMsrs)?;
// GET_MSRS returns a number of successfully set msrs.
// If number of set msrs is not equal to the length of
// `msrs`, then the value returned by GET_MSRS can act
// as an index to the problematic msr.
if nmsrs != chunk_size {
Err(KvmVcpuError::VcpuGetMsr(msrs.as_slice()[nmsrs].index))
} else {
Ok(msrs)
}
}
/// Get MSRs for the given MSR index list.
///
/// # Arguments
///
/// * `msr_index_list`: List of MSR indices
///
/// # Errors
///
/// * When `KvmVcpu::get_msr_chunks()` returns errors.
pub fn get_msrs(
&self,
msr_index_iter: impl ExactSizeIterator<Item = u32>,
) -> Result<BTreeMap<u32, u64>, KvmVcpuError> {
let mut msrs = BTreeMap::new();
self.get_msr_chunks(msr_index_iter)?
.iter()
.for_each(|msr_chunk| {
msr_chunk.as_slice().iter().for_each(|msr| {
msrs.insert(msr.index, msr.data);
});
});
Ok(msrs)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState, KvmVcpuError> {
// Ordering requirements:
//
// KVM_GET_MP_STATE calls kvm_apic_accept_events(), which might modify
// vCPU/LAPIC state. As such, it must be done before most everything
// else, otherwise we cannot restore everything and expect it to work.
//
// KVM_GET_VCPU_EVENTS/KVM_SET_VCPU_EVENTS is unsafe if other vCPUs are
// still running.
//
// KVM_GET_LAPIC may change state of LAPIC before returning it.
//
// GET_VCPU_EVENTS should probably be last to save. The code looks as
// it might as well be affected by internal state modifications of the
// GET ioctls.
//
// SREGS saves/restores a pending interrupt, similar to what
// VCPU_EVENTS also does.
let mp_state = self
.fd
.get_mp_state()
.map_err(KvmVcpuError::VcpuGetMpState)?;
let regs = self.fd.get_regs().map_err(KvmVcpuError::VcpuGetRegs)?;
let sregs = self.fd.get_sregs().map_err(KvmVcpuError::VcpuGetSregs)?;
let xsave = self.get_xsave()?;
let xcrs = self.fd.get_xcrs().map_err(KvmVcpuError::VcpuGetXcrs)?;
let debug_regs = self
.fd
.get_debug_regs()
.map_err(KvmVcpuError::VcpuGetDebugRegs)?;
let lapic = self.fd.get_lapic().map_err(KvmVcpuError::VcpuGetLapic)?;
let tsc_khz = self.get_tsc_khz().ok().or_else(|| {
// v0.25 and newer snapshots without TSC will only work on
// the same CPU model as the host on which they were taken.
// TODO: Add negative test for this warning failure.
warn!("TSC freq not available. Snapshot cannot be loaded on a different CPU model.");
None
});
let cpuid = self.get_cpuid()?;
let saved_msrs = self.get_msr_chunks(self.msrs_to_save.iter().copied())?;
let vcpu_events = self
.fd
.get_vcpu_events()
.map_err(KvmVcpuError::VcpuGetVcpuEvents)?;
Ok(VcpuState {
cpuid,
saved_msrs,
debug_regs,
lapic,
mp_state,
regs,
sregs,
vcpu_events,
xcrs,
xsave,
tsc_khz,
})
}
/// Dumps CPU configuration (CPUID and MSRs).
///
/// Opposed to `save_state()`, this dumps all the supported and dumpable MSRs not limited to
/// serializable ones.
pub fn dump_cpu_config(&self) -> Result<CpuConfiguration, KvmVcpuError> {
let cpuid = cpuid::Cpuid::try_from(self.get_cpuid()?)?;
let kvm = kvm_ioctls::Kvm::new().unwrap();
let msr_index_list = crate::arch::x86_64::msr::get_msrs_to_dump(&kvm)?;
let msrs = self.get_msrs(msr_index_list.as_slice().iter().copied())?;
Ok(CpuConfiguration { cpuid, msrs })
}
/// Checks whether the TSC needs scaling when restoring a snapshot.
///
/// # Errors
///
/// When
pub fn is_tsc_scaling_required(&self, state_tsc_freq: u32) -> Result<bool, GetTscError> {
// Compare the current TSC freq to the one found
// in the state. If they are different, we need to
// scale the TSC to the freq found in the state.
// We accept values within a tolerance of 250 parts
// per million because it is common for TSC frequency
// to differ due to calibration at boot time.
let diff = (i64::from(self.get_tsc_khz()?) - i64::from(state_tsc_freq)).abs();
// Cannot overflow since u32::MAX * 250 < i64::MAX
Ok(diff > i64::from(state_tsc_freq) * TSC_KHZ_TOL_NUMERATOR / TSC_KHZ_TOL_DENOMINATOR)
}
/// Scale the TSC frequency of this vCPU to the one provided as a parameter.
pub fn set_tsc_khz(&self, tsc_freq: u32) -> Result<(), SetTscError> {
self.fd.set_tsc_khz(tsc_freq).map_err(SetTscError)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<(), KvmVcpuError> {
// Ordering requirements:
//
// KVM_GET_VCPU_EVENTS/KVM_SET_VCPU_EVENTS is unsafe if other vCPUs are
// still running.
//
// Some SET ioctls (like set_mp_state) depend on kvm_vcpu_is_bsp(), so
// if we ever change the BSP, we have to do that before restoring anything.
// The same seems to be true for CPUID stuff.
//
// SREGS saves/restores a pending interrupt, similar to what
// VCPU_EVENTS also does.
//
// SET_REGS clears pending exceptions unconditionally, thus, it must be
// done before SET_VCPU_EVENTS, which restores it.
//
// SET_LAPIC must come after SET_SREGS, because the latter restores
// the apic base msr.
//
// SET_LAPIC must come before SET_MSRS, because the TSC deadline MSR
// only restores successfully, when the LAPIC is correctly configured.
self.fd
.set_cpuid2(&state.cpuid)
.map_err(KvmVcpuError::VcpuSetCpuid)?;
self.fd
.set_mp_state(state.mp_state)
.map_err(KvmVcpuError::VcpuSetMpState)?;
self.fd
.set_regs(&state.regs)
.map_err(KvmVcpuError::VcpuSetRegs)?;
self.fd
.set_sregs(&state.sregs)
.map_err(KvmVcpuError::VcpuSetSregs)?;
// SAFETY: Safe unless the snapshot is corrupted.
unsafe {
// kvm-ioctl's `set_xsave2()` can be called even on kernel versions not supporting
// `KVM_CAP_XSAVE2`, because it internally calls `KVM_SET_XSAVE` API that was extended
// by Linux kernel. Thus, `KVM_SET_XSAVE2` API does not exist as a KVM interface.
// However, kvm-ioctl added `set_xsave2()` to allow users to pass `Xsave` instead of the
// older `kvm_xsave`.
self.fd
.set_xsave2(&state.xsave)
.map_err(KvmVcpuError::VcpuSetXsave)?;
}
self.fd
.set_xcrs(&state.xcrs)
.map_err(KvmVcpuError::VcpuSetXcrs)?;
self.fd
.set_debug_regs(&state.debug_regs)
.map_err(KvmVcpuError::VcpuSetDebugRegs)?;
self.fd
.set_lapic(&state.lapic)
.map_err(KvmVcpuError::VcpuSetLapic)?;
for msrs in &state.saved_msrs {
let nmsrs = self.fd.set_msrs(msrs).map_err(KvmVcpuError::VcpuSetMsrs)?;
if nmsrs < msrs.as_fam_struct_ref().nmsrs as usize {
return Err(KvmVcpuError::VcpuSetMsrsIncomplete);
}
}
self.fd
.set_vcpu_events(&state.vcpu_events)
.map_err(KvmVcpuError::VcpuSetVcpuEvents)?;
self.kvmclock_ctrl();
Ok(())
}
}
impl Peripherals {
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> Result<VcpuEmulation, VcpuError> {
match exit {
VcpuExit::IoIn(addr, data) => {
if let Some(pio_bus) = &self.pio_bus {
let _metric = METRICS.vcpu.exit_io_in_agg.record_latency_metrics();
if let Err(err) = pio_bus.read(u64::from(addr), data) {
warn!("vcpu: IO read @ {addr:#x}:{:#x} failed: {err}", data.len());
}
METRICS.vcpu.exit_io_in.inc();
}
Ok(VcpuEmulation::Handled)
}
VcpuExit::IoOut(addr, data) => {
if let Some(pio_bus) = &self.pio_bus {
let _metric = METRICS.vcpu.exit_io_out_agg.record_latency_metrics();
if let Err(err) = pio_bus.write(u64::from(addr), data) {
warn!("vcpu: IO write @ {addr:#x}:{:#x} failed: {err}", data.len());
}
METRICS.vcpu.exit_io_out.inc();
}
Ok(VcpuEmulation::Handled)
}
unexpected_exit => {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", unexpected_exit);
Err(VcpuError::UnhandledKvmExit(format!(
"{:?}",
unexpected_exit
)))
}
}
}
}
/// Structure holding VCPU kvm state.
#[derive(Serialize, Deserialize)]
pub struct VcpuState {
/// CpuId.
pub cpuid: CpuId,
/// Saved msrs.
pub saved_msrs: Vec<Msrs>,
/// Debug regs.
pub debug_regs: kvm_debugregs,
/// Lapic.
pub lapic: kvm_lapic_state,
/// Mp state
pub mp_state: kvm_mp_state,
/// Kvm regs.
pub regs: kvm_regs,
/// Sregs.
pub sregs: kvm_sregs,
/// Vcpu events
pub vcpu_events: kvm_vcpu_events,
/// Xcrs.
pub xcrs: kvm_xcrs,
/// Xsave.
pub xsave: Xsave,
/// Tsc khz.
pub tsc_khz: Option<u32>,
}
impl Debug for VcpuState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut debug_kvm_regs: Vec<kvm_bindings::kvm_msrs> = Vec::new();
for kvm_msrs in self.saved_msrs.iter() {
debug_kvm_regs = kvm_msrs.clone().into_raw();
debug_kvm_regs.sort_by_key(|msr| (msr.nmsrs, msr.pad));
}
f.debug_struct("VcpuState")
.field("cpuid", &self.cpuid)
.field("saved_msrs", &debug_kvm_regs)
.field("debug_regs", &self.debug_regs)
.field("lapic", &self.lapic)
.field("mp_state", &self.mp_state)
.field("regs", &self.regs)
.field("sregs", &self.sregs)
.field("vcpu_events", &self.vcpu_events)
.field("xcrs", &self.xcrs)
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/gdt.rs | src/vmm/src/arch/x86_64/gdt.rs | // Copyright © 2020, Oracle and/or its affiliates.
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
// For GDT details see arch/x86/include/asm/segment.h
use kvm_bindings::kvm_segment;
/// Constructor for a conventional segment GDT (or LDT) entry. Derived from the kernel's segment.h.
pub fn gdt_entry(flags: u16, base: u32, limit: u32) -> u64 {
((u64::from(base) & 0xff00_0000u64) << (56 - 24))
| ((u64::from(flags) & 0x0000_f0ffu64) << 40)
| ((u64::from(limit) & 0x000f_0000u64) << (48 - 16))
| ((u64::from(base) & 0x00ff_ffffu64) << 16)
| (u64::from(limit) & 0x0000_ffffu64)
}
fn get_base(entry: u64) -> u64 {
(((entry) & 0xFF00_0000_0000_0000) >> 32)
| (((entry) & 0x0000_00FF_0000_0000) >> 16)
| (((entry) & 0x0000_0000_FFFF_0000) >> 16)
}
// Extract the segment limit from the GDT segment descriptor.
//
// In a segment descriptor, the limit field is 20 bits, so it can directly describe
// a range from 0 to 0xFFFFF (1 MB). When G flag is set (4-KByte page granularity) it
// scales the value in the limit field by a factor of 2^12 (4 Kbytes), making the effective
// limit range from 0xFFF (4 KBytes) to 0xFFFF_FFFF (4 GBytes).
//
// However, the limit field in the VMCS definition is a 32 bit field, and the limit value is not
// automatically scaled using the G flag. This means that for a desired range of 4GB for a
// given segment, its limit must be specified as 0xFFFF_FFFF. Therefore the method of obtaining
// the limit from the GDT entry is not sufficient, since it only provides 20 bits when 32 bits
// are necessary. Fortunately, we can check if the G flag is set when extracting the limit since
// the full GDT entry is passed as an argument, and perform the scaling of the limit value to
// return the full 32 bit value.
//
// The scaling mentioned above is required when using PVH boot, since the guest boots in protected
// (32-bit) mode and must be able to access the entire 32-bit address space. It does not cause
// issues for the case of direct boot to 64-bit (long) mode, since in 64-bit mode the processor does
// not perform runtime limit checking on code or data segments.
//
// (For more information concerning the formats of segment descriptors, VMCS fields, et cetera,
// please consult the Intel Software Developer Manual.)
fn get_limit(entry: u64) -> u32 {
#[allow(clippy::cast_possible_truncation)] // clearly, truncation is not possible
let limit: u32 =
((((entry) & 0x000F_0000_0000_0000) >> 32) | ((entry) & 0x0000_0000_0000_FFFF)) as u32;
// Perform manual limit scaling if G flag is set
match get_g(entry) {
0 => limit,
_ => (limit << 12) | 0xFFF, // G flag is either 0 or 1
}
}
fn get_g(entry: u64) -> u8 {
((entry & 0x0080_0000_0000_0000) >> 55) as u8
}
fn get_db(entry: u64) -> u8 {
((entry & 0x0040_0000_0000_0000) >> 54) as u8
}
fn get_l(entry: u64) -> u8 {
((entry & 0x0020_0000_0000_0000) >> 53) as u8
}
fn get_avl(entry: u64) -> u8 {
((entry & 0x0010_0000_0000_0000) >> 52) as u8
}
fn get_p(entry: u64) -> u8 {
((entry & 0x0000_8000_0000_0000) >> 47) as u8
}
fn get_dpl(entry: u64) -> u8 {
((entry & 0x0000_6000_0000_0000) >> 45) as u8
}
fn get_s(entry: u64) -> u8 {
((entry & 0x0000_1000_0000_0000) >> 44) as u8
}
fn get_type(entry: u64) -> u8 {
((entry & 0x0000_0F00_0000_0000) >> 40) as u8
}
/// Automatically build the kvm struct for SET_SREGS from the kernel bit fields.
///
/// # Arguments
///
/// * `entry` - The gdt entry.
/// * `table_index` - Index of the entry in the gdt table.
pub fn kvm_segment_from_gdt(entry: u64, table_index: u8) -> kvm_segment {
kvm_segment {
base: get_base(entry),
limit: get_limit(entry),
selector: u16::from(table_index * 8),
type_: get_type(entry),
present: get_p(entry),
dpl: get_dpl(entry),
db: get_db(entry),
s: get_s(entry),
l: get_l(entry),
g: get_g(entry),
avl: get_avl(entry),
padding: 0,
unusable: match get_p(entry) {
0 => 1,
_ => 0,
},
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn field_parse() {
let gdt = gdt_entry(0xA09B, 0x10_0000, 0xfffff);
let seg = kvm_segment_from_gdt(gdt, 0);
// 0xA09B
// 'A'
assert_eq!(0x1, seg.g);
assert_eq!(0x0, seg.db);
assert_eq!(0x1, seg.l);
assert_eq!(0x0, seg.avl);
// '9'
assert_eq!(0x1, seg.present);
assert_eq!(0x0, seg.dpl);
assert_eq!(0x1, seg.s);
// 'B'
assert_eq!(0xB, seg.type_);
// base and limit
assert_eq!(0x10_0000, seg.base);
assert_eq!(0xffff_ffff, seg.limit);
assert_eq!(0x0, seg.unusable);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/mptable.rs | src/vmm/src/arch/x86_64/mptable.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::convert::TryFrom;
use std::fmt::Debug;
use std::mem::{self, size_of};
use libc::c_char;
use log::debug;
use vm_allocator::AllocPolicy;
use crate::arch::GSI_LEGACY_END;
use crate::arch::x86_64::generated::mpspec;
use crate::vstate::memory::{
Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap,
};
use crate::vstate::resources::ResourceAllocator;
// These `mpspec` wrapper types are only data, reading them from data is a safe initialization.
// SAFETY: POD
unsafe impl ByteValued for mpspec::mpc_bus {}
// SAFETY: POD
unsafe impl ByteValued for mpspec::mpc_cpu {}
// SAFETY: POD
unsafe impl ByteValued for mpspec::mpc_intsrc {}
// SAFETY: POD
unsafe impl ByteValued for mpspec::mpc_ioapic {}
// SAFETY: POD
unsafe impl ByteValued for mpspec::mpc_table {}
// SAFETY: POD
unsafe impl ByteValued for mpspec::mpc_lintsrc {}
// SAFETY: POD
unsafe impl ByteValued for mpspec::mpf_intel {}
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum MptableError {
/// There was too little guest memory to store the entire MP table.
NotEnoughMemory,
/// The MP table has too little address space to be stored.
AddressOverflow,
/// Failure while zeroing out the memory for the MP table.
Clear,
/// Number of CPUs exceeds the maximum supported CPUs
TooManyCpus,
/// Number of IRQs exceeds the maximum supported IRQs
TooManyIrqs,
/// Failure to write the MP floating pointer.
WriteMpfIntel,
/// Failure to write MP CPU entry.
WriteMpcCpu,
/// Failure to write MP ioapic entry.
WriteMpcIoapic,
/// Failure to write MP bus entry.
WriteMpcBus,
/// Failure to write MP interrupt source entry.
WriteMpcIntsrc,
/// Failure to write MP local interrupt source entry.
WriteMpcLintsrc,
/// Failure to write MP table header.
WriteMpcTable,
/// Failure to allocate memory for MPTable
AllocateMemory(#[from] vm_allocator::Error),
}
// With APIC/xAPIC, there are only 255 APIC IDs available. And IOAPIC occupies
// one APIC ID, so only 254 CPUs at maximum may be supported. Actually it's
// a large number for FC usecases.
pub const MAX_SUPPORTED_CPUS: u8 = 254;
// Convenience macro for making arrays of diverse character types.
macro_rules! char_array {
($t:ty; $( $c:expr ),*) => ( [ $( $c as $t ),* ] )
}
// Most of these variables are sourced from the Intel MP Spec 1.4.
const SMP_MAGIC_IDENT: [c_char; 4] = char_array!(c_char; '_', 'M', 'P', '_');
const MPC_SIGNATURE: [c_char; 4] = char_array!(c_char; 'P', 'C', 'M', 'P');
const MPC_SPEC: i8 = 4;
const MPC_OEM: [c_char; 8] = char_array!(c_char; 'F', 'C', ' ', ' ', ' ', ' ', ' ', ' ');
const MPC_PRODUCT_ID: [c_char; 12] = ['0' as c_char; 12];
const BUS_TYPE_ISA: [u8; 6] = [b'I', b'S', b'A', b' ', b' ', b' '];
const IO_APIC_DEFAULT_PHYS_BASE: u32 = 0xfec0_0000; // source: linux/arch/x86/include/asm/apicdef.h
const APIC_DEFAULT_PHYS_BASE: u32 = 0xfee0_0000; // source: linux/arch/x86/include/asm/apicdef.h
const APIC_VERSION: u8 = 0x14;
const CPU_STEPPING: u32 = 0x600;
const CPU_FEATURE_APIC: u32 = 0x200;
const CPU_FEATURE_FPU: u32 = 0x001;
fn compute_checksum<T: ByteValued>(v: &T) -> u8 {
let mut checksum: u8 = 0;
for i in v.as_slice() {
checksum = checksum.wrapping_add(*i);
}
checksum
}
fn mpf_intel_compute_checksum(v: &mpspec::mpf_intel) -> u8 {
let checksum = compute_checksum(v).wrapping_sub(v.checksum);
(!checksum).wrapping_add(1)
}
fn compute_mp_size(num_cpus: u8) -> usize {
mem::size_of::<mpspec::mpf_intel>()
+ mem::size_of::<mpspec::mpc_table>()
+ mem::size_of::<mpspec::mpc_cpu>() * (num_cpus as usize)
+ mem::size_of::<mpspec::mpc_ioapic>()
+ mem::size_of::<mpspec::mpc_bus>()
+ mem::size_of::<mpspec::mpc_intsrc>() * (GSI_LEGACY_END as usize + 1)
+ mem::size_of::<mpspec::mpc_lintsrc>() * 2
}
/// Performs setup of the MP table for the given `num_cpus`.
pub fn setup_mptable(
mem: &GuestMemoryMmap,
resource_allocator: &mut ResourceAllocator,
num_cpus: u8,
) -> Result<(), MptableError> {
if num_cpus > MAX_SUPPORTED_CPUS {
return Err(MptableError::TooManyCpus);
}
let mp_size = compute_mp_size(num_cpus);
let mptable_addr =
resource_allocator.allocate_system_memory(mp_size as u64, 1, AllocPolicy::FirstMatch)?;
debug!(
"mptable: Allocated {mp_size} bytes for MPTable {num_cpus} vCPUs at address {:#010x}",
mptable_addr
);
// Used to keep track of the next base pointer into the MP table.
let mut base_mp = GuestAddress(mptable_addr);
let mut mp_num_entries: u16 = 0;
let mut checksum: u8 = 0;
let ioapicid: u8 = num_cpus + 1;
// The checked_add here ensures the all of the following base_mp.unchecked_add's will be without
// overflow.
if let Some(end_mp) = base_mp.checked_add((mp_size - 1) as u64) {
if !mem.address_in_range(end_mp) {
return Err(MptableError::NotEnoughMemory);
}
} else {
return Err(MptableError::AddressOverflow);
}
mem.write_slice(&vec![0; mp_size], base_mp)
.map_err(|_| MptableError::Clear)?;
{
let size = mem::size_of::<mpspec::mpf_intel>() as u64;
let mut mpf_intel = mpspec::mpf_intel {
signature: SMP_MAGIC_IDENT,
physptr: u32::try_from(base_mp.raw_value() + size).unwrap(),
length: 1,
specification: 4,
..mpspec::mpf_intel::default()
};
mpf_intel.checksum = mpf_intel_compute_checksum(&mpf_intel);
mem.write_obj(mpf_intel, base_mp)
.map_err(|_| MptableError::WriteMpfIntel)?;
base_mp = base_mp.unchecked_add(size);
mp_num_entries += 1;
}
// We set the location of the mpc_table here but we can't fill it out until we have the length
// of the entire table later.
let table_base = base_mp;
base_mp = base_mp.unchecked_add(mem::size_of::<mpspec::mpc_table>() as u64);
{
let size = mem::size_of::<mpspec::mpc_cpu>() as u64;
for cpu_id in 0..num_cpus {
let mpc_cpu = mpspec::mpc_cpu {
type_: mpspec::MP_PROCESSOR.try_into().unwrap(),
apicid: cpu_id,
apicver: APIC_VERSION,
cpuflag: u8::try_from(mpspec::CPU_ENABLED).unwrap()
| if cpu_id == 0 {
u8::try_from(mpspec::CPU_BOOTPROCESSOR).unwrap()
} else {
0
},
cpufeature: CPU_STEPPING,
featureflag: CPU_FEATURE_APIC | CPU_FEATURE_FPU,
..Default::default()
};
mem.write_obj(mpc_cpu, base_mp)
.map_err(|_| MptableError::WriteMpcCpu)?;
base_mp = base_mp.unchecked_add(size);
checksum = checksum.wrapping_add(compute_checksum(&mpc_cpu));
mp_num_entries += 1;
}
}
{
let size = mem::size_of::<mpspec::mpc_bus>() as u64;
let mpc_bus = mpspec::mpc_bus {
type_: mpspec::MP_BUS.try_into().unwrap(),
busid: 0,
bustype: BUS_TYPE_ISA,
};
mem.write_obj(mpc_bus, base_mp)
.map_err(|_| MptableError::WriteMpcBus)?;
base_mp = base_mp.unchecked_add(size);
checksum = checksum.wrapping_add(compute_checksum(&mpc_bus));
mp_num_entries += 1;
}
{
let size = mem::size_of::<mpspec::mpc_ioapic>() as u64;
let mpc_ioapic = mpspec::mpc_ioapic {
type_: mpspec::MP_IOAPIC.try_into().unwrap(),
apicid: ioapicid,
apicver: APIC_VERSION,
flags: mpspec::MPC_APIC_USABLE.try_into().unwrap(),
apicaddr: IO_APIC_DEFAULT_PHYS_BASE,
};
mem.write_obj(mpc_ioapic, base_mp)
.map_err(|_| MptableError::WriteMpcIoapic)?;
base_mp = base_mp.unchecked_add(size);
checksum = checksum.wrapping_add(compute_checksum(&mpc_ioapic));
mp_num_entries += 1;
}
// Per kvm_setup_default_irq_routing() in kernel
for i in 0..=u8::try_from(GSI_LEGACY_END).map_err(|_| MptableError::TooManyIrqs)? {
let size = mem::size_of::<mpspec::mpc_intsrc>() as u64;
let mpc_intsrc = mpspec::mpc_intsrc {
type_: mpspec::MP_INTSRC.try_into().unwrap(),
irqtype: mpspec::mp_irq_source_types::mp_INT.try_into().unwrap(),
irqflag: mpspec::MP_IRQPOL_DEFAULT.try_into().unwrap(),
srcbus: 0,
srcbusirq: i,
dstapic: ioapicid,
dstirq: i,
};
mem.write_obj(mpc_intsrc, base_mp)
.map_err(|_| MptableError::WriteMpcIntsrc)?;
base_mp = base_mp.unchecked_add(size);
checksum = checksum.wrapping_add(compute_checksum(&mpc_intsrc));
mp_num_entries += 1;
}
{
let size = mem::size_of::<mpspec::mpc_lintsrc>() as u64;
let mpc_lintsrc = mpspec::mpc_lintsrc {
type_: mpspec::MP_LINTSRC.try_into().unwrap(),
irqtype: mpspec::mp_irq_source_types::mp_ExtINT.try_into().unwrap(),
irqflag: mpspec::MP_IRQPOL_DEFAULT.try_into().unwrap(),
srcbusid: 0,
srcbusirq: 0,
destapic: 0,
destapiclint: 0,
};
mem.write_obj(mpc_lintsrc, base_mp)
.map_err(|_| MptableError::WriteMpcLintsrc)?;
base_mp = base_mp.unchecked_add(size);
checksum = checksum.wrapping_add(compute_checksum(&mpc_lintsrc));
mp_num_entries += 1;
}
{
let size = mem::size_of::<mpspec::mpc_lintsrc>() as u64;
let mpc_lintsrc = mpspec::mpc_lintsrc {
type_: mpspec::MP_LINTSRC.try_into().unwrap(),
irqtype: mpspec::mp_irq_source_types::mp_NMI.try_into().unwrap(),
irqflag: mpspec::MP_IRQPOL_DEFAULT.try_into().unwrap(),
srcbusid: 0,
srcbusirq: 0,
destapic: 0xFF,
destapiclint: 1,
};
mem.write_obj(mpc_lintsrc, base_mp)
.map_err(|_| MptableError::WriteMpcLintsrc)?;
base_mp = base_mp.unchecked_add(size);
checksum = checksum.wrapping_add(compute_checksum(&mpc_lintsrc));
mp_num_entries += 1;
}
// At this point we know the size of the mp_table.
let table_end = base_mp;
{
let mut mpc_table = mpspec::mpc_table {
signature: MPC_SIGNATURE,
// it's safe to use unchecked_offset_from because
// table_end > table_base
length: table_end
.unchecked_offset_from(table_base)
.try_into()
.unwrap(),
spec: MPC_SPEC,
oem: MPC_OEM,
oemcount: mp_num_entries,
productid: MPC_PRODUCT_ID,
lapic: APIC_DEFAULT_PHYS_BASE,
..Default::default()
};
debug_assert_eq!(
mpc_table.length as usize + size_of::<mpspec::mpf_intel>(),
mp_size
);
checksum = checksum.wrapping_add(compute_checksum(&mpc_table));
#[allow(clippy::cast_possible_wrap)]
let checksum_final = (!checksum).wrapping_add(1) as i8;
mpc_table.checksum = checksum_final;
mem.write_obj(mpc_table, table_base)
.map_err(|_| MptableError::WriteMpcTable)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::arch::SYSTEM_MEM_START;
use crate::test_utils::single_region_mem_at;
use crate::vstate::memory::Bytes;
fn table_entry_size(type_: u8) -> usize {
match u32::from(type_) {
mpspec::MP_PROCESSOR => mem::size_of::<mpspec::mpc_cpu>(),
mpspec::MP_BUS => mem::size_of::<mpspec::mpc_bus>(),
mpspec::MP_IOAPIC => mem::size_of::<mpspec::mpc_ioapic>(),
mpspec::MP_INTSRC => mem::size_of::<mpspec::mpc_intsrc>(),
mpspec::MP_LINTSRC => mem::size_of::<mpspec::mpc_lintsrc>(),
_ => panic!("unrecognized mpc table entry type: {}", type_),
}
}
#[test]
fn bounds_check() {
let num_cpus = 4;
let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus));
let mut resource_allocator = ResourceAllocator::new();
setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap();
}
#[test]
fn bounds_check_fails() {
let num_cpus = 4;
let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus) - 1);
let mut resource_allocator = ResourceAllocator::new();
setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap_err();
}
#[test]
fn mpf_intel_checksum() {
let num_cpus = 1;
let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus));
let mut resource_allocator = ResourceAllocator::new();
setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap();
let mpf_intel: mpspec::mpf_intel = mem.read_obj(GuestAddress(SYSTEM_MEM_START)).unwrap();
assert_eq!(mpf_intel_compute_checksum(&mpf_intel), mpf_intel.checksum);
}
#[test]
fn mpc_table_checksum() {
let num_cpus = 4;
let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus));
let mut resource_allocator = ResourceAllocator::new();
setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap();
let mpf_intel: mpspec::mpf_intel = mem.read_obj(GuestAddress(SYSTEM_MEM_START)).unwrap();
let mpc_offset = GuestAddress(u64::from(mpf_intel.physptr));
let mpc_table: mpspec::mpc_table = mem.read_obj(mpc_offset).unwrap();
let mut buffer = Vec::new();
mem.write_volatile_to(mpc_offset, &mut buffer, mpc_table.length as usize)
.unwrap();
assert_eq!(
buffer
.iter()
.fold(0u8, |accum, &item| accum.wrapping_add(item)),
0
);
}
#[test]
fn mpc_entry_count() {
let num_cpus = 1;
let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(num_cpus));
let mut resource_allocator = ResourceAllocator::new();
setup_mptable(&mem, &mut resource_allocator, num_cpus).unwrap();
let mpf_intel: mpspec::mpf_intel = mem.read_obj(GuestAddress(SYSTEM_MEM_START)).unwrap();
let mpc_offset = GuestAddress(u64::from(mpf_intel.physptr));
let mpc_table: mpspec::mpc_table = mem.read_obj(mpc_offset).unwrap();
let expected_entry_count =
// Intel floating point
1
// CPU
+ u16::from(num_cpus)
// IOAPIC
+ 1
// ISA Bus
+ 1
// IRQ
+ u16::try_from(GSI_LEGACY_END).unwrap() + 1
// Interrupt source ExtINT
+ 1
// Interrupt source NMI
+ 1;
assert_eq!(mpc_table.oemcount, expected_entry_count);
}
#[test]
fn cpu_entry_count() {
let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(MAX_SUPPORTED_CPUS));
for i in 0..MAX_SUPPORTED_CPUS {
let mut resource_allocator = ResourceAllocator::new();
setup_mptable(&mem, &mut resource_allocator, i).unwrap();
let mpf_intel: mpspec::mpf_intel =
mem.read_obj(GuestAddress(SYSTEM_MEM_START)).unwrap();
let mpc_offset = GuestAddress(u64::from(mpf_intel.physptr));
let mpc_table: mpspec::mpc_table = mem.read_obj(mpc_offset).unwrap();
let mpc_end = mpc_offset.checked_add(u64::from(mpc_table.length)).unwrap();
let mut entry_offset = mpc_offset
.checked_add(mem::size_of::<mpspec::mpc_table>() as u64)
.unwrap();
let mut cpu_count = 0;
while entry_offset < mpc_end {
let entry_type: u8 = mem.read_obj(entry_offset).unwrap();
entry_offset = entry_offset
.checked_add(table_entry_size(entry_type) as u64)
.unwrap();
assert!(entry_offset <= mpc_end);
if u32::from(entry_type) == mpspec::MP_PROCESSOR {
cpu_count += 1;
}
}
assert_eq!(cpu_count, i);
}
}
#[test]
fn cpu_entry_count_max() {
let cpus = MAX_SUPPORTED_CPUS + 1;
let mem = single_region_mem_at(SYSTEM_MEM_START, compute_mp_size(cpus));
let mut resource_allocator = ResourceAllocator::new();
let result = setup_mptable(&mem, &mut resource_allocator, cpus).unwrap_err();
assert_eq!(result, MptableError::TooManyCpus);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/mod.rs | src/vmm/src/arch/x86_64/mod.rs | // Copyright © 2020, Oracle and/or its affiliates.
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
/// Logic for handling x86_64 CPU models.
pub mod cpu_model;
mod gdt;
/// Contains logic for setting up Advanced Programmable Interrupt Controller (local version).
pub mod interrupts;
/// Architecture specific KVM-related code
pub mod kvm;
/// Layout for the x86_64 system.
pub mod layout;
mod mptable;
/// Logic for configuring x86_64 model specific registers (MSRs).
pub mod msr;
/// Logic for configuring x86_64 registers.
pub mod regs;
/// Architecture specific vCPU code
pub mod vcpu;
/// Architecture specific VM state code
pub mod vm;
/// Logic for configuring XSTATE features.
pub mod xstate;
#[allow(missing_docs)]
pub mod generated;
use std::cmp::max;
use std::fs::File;
use kvm::Kvm;
use layout::{
CMDLINE_START, MMIO32_MEM_SIZE, MMIO32_MEM_START, MMIO64_MEM_SIZE, MMIO64_MEM_START,
PCI_MMCONFIG_SIZE, PCI_MMCONFIG_START,
};
use linux_loader::configurator::linux::LinuxBootConfigurator;
use linux_loader::configurator::pvh::PvhBootConfigurator;
use linux_loader::configurator::{BootConfigurator, BootParams};
use linux_loader::loader::bootparam::boot_params;
use linux_loader::loader::elf::Elf as Loader;
use linux_loader::loader::elf::start_info::{
hvm_memmap_table_entry, hvm_modlist_entry, hvm_start_info,
};
use linux_loader::loader::{Cmdline, KernelLoader, PvhBootCapability, load_cmdline};
use log::debug;
use super::EntryPoint;
use crate::acpi::create_acpi_tables;
use crate::arch::{BootProtocol, SYSTEM_MEM_SIZE, SYSTEM_MEM_START, arch_memory_regions_with_gap};
use crate::cpu_config::templates::{CustomCpuTemplate, GuestConfigError};
use crate::cpu_config::x86_64::CpuConfiguration;
use crate::device_manager::DeviceManager;
use crate::initrd::InitrdConfig;
use crate::utils::{align_down, u64_to_usize, usize_to_u64};
use crate::vmm_config::machine_config::MachineConfig;
use crate::vstate::memory::{
Address, GuestAddress, GuestMemory, GuestMemoryMmap, GuestMemoryRegion, GuestRegionType,
};
use crate::vstate::vcpu::KvmVcpuConfigureError;
use crate::{Vcpu, VcpuConfig, Vm, logger};
// Value taken from https://elixir.bootlin.com/linux/v5.10.68/source/arch/x86/include/uapi/asm/e820.h#L31
// Usable normal RAM
const E820_RAM: u32 = 1;
// Reserved area that should be avoided during memory allocations
const E820_RESERVED: u32 = 2;
const MEMMAP_TYPE_RAM: u32 = 1;
/// Errors thrown while configuring x86_64 system.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum ConfigurationError {
/// Invalid e820 setup params.
E820Configuration,
/// Error writing MP table to memory: {0}
MpTableSetup(#[from] mptable::MptableError),
/// Error writing the zero page of guest memory.
ZeroPageSetup,
/// Error writing module entry to guest memory.
ModlistSetup,
/// Error writing memory map table to guest memory.
MemmapTableSetup,
/// Error writing hvm_start_info to guest memory.
StartInfoSetup,
/// Cannot copy kernel file fd
KernelFile,
/// Cannot load kernel due to invalid memory configuration or invalid kernel image: {0}
KernelLoader(linux_loader::loader::Error),
/// Cannot load command line string: {0}
LoadCommandline(linux_loader::loader::Error),
/// Failed to create guest config: {0}
CreateGuestConfig(#[from] GuestConfigError),
/// Error configuring the vcpu for boot: {0}
VcpuConfigure(#[from] KvmVcpuConfigureError),
/// Error configuring ACPI: {0}
Acpi(#[from] crate::acpi::AcpiError),
}
/// Returns a Vec of the valid memory addresses.
/// These should be used to configure the GuestMemoryMmap structure for the platform.
/// For x86_64 all addresses are valid from the start of the kernel except an 1GB
/// carve out at the end of 32bit address space and a second 256GB one at the 256GB limit.
pub fn arch_memory_regions(size: usize) -> Vec<(GuestAddress, usize)> {
// If we get here with size == 0 something has seriously gone wrong. Firecracker should never
// try to allocate guest memory of size 0
assert!(size > 0, "Attempt to allocate guest memory of length 0");
let dram_size = std::cmp::min(
usize::MAX - u64_to_usize(MMIO32_MEM_SIZE) - u64_to_usize(MMIO64_MEM_SIZE),
size,
);
if dram_size != size {
logger::warn!(
"Requested memory size {} exceeds architectural maximum (1022GiB). Size has been \
truncated to {}",
size,
dram_size
);
}
let mut regions = vec![];
if let Some((start_past_32bit_gap, remaining_past_32bit_gap)) = arch_memory_regions_with_gap(
&mut regions,
0,
dram_size,
u64_to_usize(MMIO32_MEM_START),
u64_to_usize(MMIO32_MEM_SIZE),
) && let Some((start_past_64bit_gap, remaining_past_64bit_gap)) =
arch_memory_regions_with_gap(
&mut regions,
start_past_32bit_gap,
remaining_past_32bit_gap,
u64_to_usize(MMIO64_MEM_START),
u64_to_usize(MMIO64_MEM_SIZE),
)
{
regions.push((
GuestAddress(start_past_64bit_gap as u64),
remaining_past_64bit_gap,
));
}
regions
}
/// Returns the memory address where the kernel could be loaded.
pub fn get_kernel_start() -> u64 {
layout::HIMEM_START
}
/// Returns the memory address where the initrd could be loaded.
pub fn initrd_load_addr(guest_mem: &GuestMemoryMmap, initrd_size: usize) -> Option<u64> {
let first_region = guest_mem.find_region(GuestAddress::new(0))?;
let lowmem_size = u64_to_usize(first_region.len());
if lowmem_size < initrd_size {
return None;
}
Some(align_down(
usize_to_u64(lowmem_size - initrd_size),
usize_to_u64(super::GUEST_PAGE_SIZE),
))
}
/// Configures the system for booting Linux.
#[allow(clippy::too_many_arguments)]
pub fn configure_system_for_boot(
kvm: &Kvm,
vm: &Vm,
device_manager: &mut DeviceManager,
vcpus: &mut [Vcpu],
machine_config: &MachineConfig,
cpu_template: &CustomCpuTemplate,
entry_point: EntryPoint,
initrd: &Option<InitrdConfig>,
boot_cmdline: Cmdline,
) -> Result<(), ConfigurationError> {
// Construct the base CpuConfiguration to apply CPU template onto.
let cpu_config = CpuConfiguration::new(kvm.supported_cpuid.clone(), cpu_template, &vcpus[0])?;
// Apply CPU template to the base CpuConfiguration.
let cpu_config = CpuConfiguration::apply_template(cpu_config, cpu_template)?;
let vcpu_config = VcpuConfig {
vcpu_count: machine_config.vcpu_count,
smt: machine_config.smt,
cpu_config,
};
// Configure vCPUs with normalizing and setting the generated CPU configuration.
for vcpu in vcpus.iter_mut() {
vcpu.kvm_vcpu
.configure(vm.guest_memory(), entry_point, &vcpu_config)?;
}
// Write the kernel command line to guest memory. This is x86_64 specific, since on
// aarch64 the command line will be specified through the FDT.
let cmdline_size = boot_cmdline
.as_cstring()
.map(|cmdline_cstring| cmdline_cstring.as_bytes_with_nul().len())
.expect("Cannot create cstring from cmdline string");
load_cmdline(
vm.guest_memory(),
GuestAddress(crate::arch::x86_64::layout::CMDLINE_START),
&boot_cmdline,
)
.map_err(ConfigurationError::LoadCommandline)?;
// Note that this puts the mptable at the last 1k of Linux's 640k base RAM
mptable::setup_mptable(
vm.guest_memory(),
&mut vm.resource_allocator(),
vcpu_config.vcpu_count,
)
.map_err(ConfigurationError::MpTableSetup)?;
match entry_point.protocol {
BootProtocol::PvhBoot => {
configure_pvh(vm.guest_memory(), GuestAddress(CMDLINE_START), initrd)?;
}
BootProtocol::LinuxBoot => {
configure_64bit_boot(
vm.guest_memory(),
GuestAddress(CMDLINE_START),
cmdline_size,
initrd,
)?;
}
}
// Create ACPI tables and write them in guest memory
// For the time being we only support ACPI in x86_64
create_acpi_tables(
vm.guest_memory(),
device_manager,
&mut vm.resource_allocator(),
vcpus,
)?;
Ok(())
}
fn configure_pvh(
guest_mem: &GuestMemoryMmap,
cmdline_addr: GuestAddress,
initrd: &Option<InitrdConfig>,
) -> Result<(), ConfigurationError> {
const XEN_HVM_START_MAGIC_VALUE: u32 = 0x336e_c578;
let himem_start = GuestAddress(layout::HIMEM_START);
// Vector to hold modules (currently either empty or holding initrd).
let mut modules: Vec<hvm_modlist_entry> = Vec::new();
if let Some(initrd_config) = initrd {
// The initrd has been written to guest memory already, here we just need to
// create the module structure that describes it.
modules.push(hvm_modlist_entry {
paddr: initrd_config.address.raw_value(),
size: initrd_config.size as u64,
..Default::default()
});
}
// Vector to hold the memory maps which needs to be written to guest memory
// at MEMMAP_START after all of the mappings are recorded.
let mut memmap: Vec<hvm_memmap_table_entry> = Vec::new();
// Create the memory map entries.
memmap.push(hvm_memmap_table_entry {
addr: 0,
size: SYSTEM_MEM_START,
type_: MEMMAP_TYPE_RAM,
..Default::default()
});
memmap.push(hvm_memmap_table_entry {
addr: SYSTEM_MEM_START,
size: SYSTEM_MEM_SIZE,
type_: E820_RESERVED,
..Default::default()
});
memmap.push(hvm_memmap_table_entry {
addr: PCI_MMCONFIG_START,
size: PCI_MMCONFIG_SIZE,
type_: E820_RESERVED,
..Default::default()
});
for region in guest_mem
.iter()
.filter(|region| region.region_type == GuestRegionType::Dram)
{
// the first 1MB is reserved for the kernel
let addr = max(himem_start, region.start_addr());
memmap.push(hvm_memmap_table_entry {
addr: addr.raw_value(),
size: region.last_addr().unchecked_offset_from(addr) + 1,
type_: MEMMAP_TYPE_RAM,
..Default::default()
});
}
// Construct the hvm_start_info structure and serialize it into
// boot_params. This will be stored at PVH_INFO_START address, and %rbx
// will be initialized to contain PVH_INFO_START prior to starting the
// guest, as required by the PVH ABI.
#[allow(clippy::cast_possible_truncation)] // the vec lengths are single digit integers
let mut start_info = hvm_start_info {
magic: XEN_HVM_START_MAGIC_VALUE,
version: 1,
cmdline_paddr: cmdline_addr.raw_value(),
memmap_paddr: layout::MEMMAP_START,
memmap_entries: memmap.len() as u32,
nr_modules: modules.len() as u32,
..Default::default()
};
if !modules.is_empty() {
start_info.modlist_paddr = layout::MODLIST_START;
}
let mut boot_params =
BootParams::new::<hvm_start_info>(&start_info, GuestAddress(layout::PVH_INFO_START));
// Copy the vector with the memmap table to the MEMMAP_START address
// which is already saved in the memmap_paddr field of hvm_start_info struct.
boot_params.set_sections::<hvm_memmap_table_entry>(&memmap, GuestAddress(layout::MEMMAP_START));
// Copy the vector with the modules list to the MODLIST_START address.
// Note that we only set the modlist_paddr address if there is a nonzero
// number of modules, but serializing an empty list is harmless.
boot_params.set_modules::<hvm_modlist_entry>(&modules, GuestAddress(layout::MODLIST_START));
// Write the hvm_start_info struct to guest memory.
PvhBootConfigurator::write_bootparams(&boot_params, guest_mem)
.map_err(|_| ConfigurationError::StartInfoSetup)
}
fn configure_64bit_boot(
guest_mem: &GuestMemoryMmap,
cmdline_addr: GuestAddress,
cmdline_size: usize,
initrd: &Option<InitrdConfig>,
) -> Result<(), ConfigurationError> {
const KERNEL_BOOT_FLAG_MAGIC: u16 = 0xaa55;
const KERNEL_HDR_MAGIC: u32 = 0x5372_6448;
const KERNEL_LOADER_OTHER: u8 = 0xff;
const KERNEL_MIN_ALIGNMENT_BYTES: u32 = 0x0100_0000; // Must be non-zero.
let himem_start = GuestAddress(layout::HIMEM_START);
// Set the location of RSDP in Boot Parameters to help the guest kernel find it faster.
let mut params = boot_params {
acpi_rsdp_addr: layout::RSDP_ADDR,
..Default::default()
};
params.hdr.type_of_loader = KERNEL_LOADER_OTHER;
params.hdr.boot_flag = KERNEL_BOOT_FLAG_MAGIC;
params.hdr.header = KERNEL_HDR_MAGIC;
params.hdr.cmd_line_ptr = u32::try_from(cmdline_addr.raw_value()).unwrap();
params.hdr.cmdline_size = u32::try_from(cmdline_size).unwrap();
params.hdr.kernel_alignment = KERNEL_MIN_ALIGNMENT_BYTES;
if let Some(initrd_config) = initrd {
params.hdr.ramdisk_image = u32::try_from(initrd_config.address.raw_value()).unwrap();
params.hdr.ramdisk_size = u32::try_from(initrd_config.size).unwrap();
}
// We mark first [0x0, SYSTEM_MEM_START) region as usable RAM and the subsequent
// [SYSTEM_MEM_START, (SYSTEM_MEM_START + SYSTEM_MEM_SIZE)) as reserved (note
// SYSTEM_MEM_SIZE + SYSTEM_MEM_SIZE == HIMEM_START).
add_e820_entry(&mut params, 0, layout::SYSTEM_MEM_START, E820_RAM)?;
add_e820_entry(
&mut params,
layout::SYSTEM_MEM_START,
layout::SYSTEM_MEM_SIZE,
E820_RESERVED,
)?;
add_e820_entry(
&mut params,
PCI_MMCONFIG_START,
PCI_MMCONFIG_SIZE,
E820_RESERVED,
)?;
for region in guest_mem
.iter()
.filter(|region| region.region_type == GuestRegionType::Dram)
{
// the first 1MB is reserved for the kernel
let addr = max(himem_start, region.start_addr());
add_e820_entry(
&mut params,
addr.raw_value(),
region.last_addr().unchecked_offset_from(addr) + 1,
E820_RAM,
)?;
}
LinuxBootConfigurator::write_bootparams(
&BootParams::new(¶ms, GuestAddress(layout::ZERO_PAGE_START)),
guest_mem,
)
.map_err(|_| ConfigurationError::ZeroPageSetup)
}
/// Add an e820 region to the e820 map.
/// Returns Ok(()) if successful, or an error if there is no space left in the map.
fn add_e820_entry(
params: &mut boot_params,
addr: u64,
size: u64,
mem_type: u32,
) -> Result<(), ConfigurationError> {
if params.e820_entries as usize >= params.e820_table.len() {
return Err(ConfigurationError::E820Configuration);
}
params.e820_table[params.e820_entries as usize].addr = addr;
params.e820_table[params.e820_entries as usize].size = size;
params.e820_table[params.e820_entries as usize].type_ = mem_type;
params.e820_entries += 1;
Ok(())
}
/// Load linux kernel into guest memory.
pub fn load_kernel(
kernel: &File,
guest_memory: &GuestMemoryMmap,
) -> Result<EntryPoint, ConfigurationError> {
// Need to clone the File because reading from it
// mutates it.
let mut kernel_file = kernel
.try_clone()
.map_err(|_| ConfigurationError::KernelFile)?;
let entry_addr = Loader::load(
guest_memory,
None,
&mut kernel_file,
Some(GuestAddress(get_kernel_start())),
)
.map_err(ConfigurationError::KernelLoader)?;
let mut entry_point_addr: GuestAddress = entry_addr.kernel_load;
let mut boot_prot: BootProtocol = BootProtocol::LinuxBoot;
if let PvhBootCapability::PvhEntryPresent(pvh_entry_addr) = entry_addr.pvh_boot_cap {
// Use the PVH kernel entry point to boot the guest
entry_point_addr = pvh_entry_addr;
boot_prot = BootProtocol::PvhBoot;
}
debug!("Kernel loaded using {boot_prot}");
Ok(EntryPoint {
entry_addr: entry_point_addr,
protocol: boot_prot,
})
}
#[cfg(kani)]
mod verification {
use crate::arch::arch_memory_regions;
use crate::arch::x86_64::layout::{
FIRST_ADDR_PAST_32BITS, FIRST_ADDR_PAST_64BITS_MMIO, MMIO32_MEM_SIZE, MMIO32_MEM_START,
MMIO64_MEM_SIZE, MMIO64_MEM_START,
};
use crate::utils::u64_to_usize;
#[kani::proof]
#[kani::unwind(4)]
fn verify_arch_memory_regions() {
let len: u64 = kani::any::<u64>();
kani::assume(len > 0);
let regions = arch_memory_regions(len as usize);
// There are two MMIO gaps, so we can get either 1, 2 or 3 regions
assert!(regions.len() <= 3);
assert!(regions.len() >= 1);
// The first address is always 0
assert_eq!(regions[0].0.0, 0);
// The total length of all regions is what we requested
let actual_size = regions.iter().map(|&(_, len)| len).sum::<usize>();
assert!(actual_size <= len as usize);
if actual_size < u64_to_usize(len) {
assert_eq!(
actual_size,
usize::MAX - u64_to_usize(MMIO32_MEM_SIZE) - u64_to_usize(MMIO64_MEM_SIZE)
);
}
// No region overlaps the MMIO gap
assert!(
regions
.iter()
.all(|&(start, len)| (start.0 >= FIRST_ADDR_PAST_32BITS
|| start.0 + len as u64 <= MMIO32_MEM_START)
&& (start.0 >= FIRST_ADDR_PAST_64BITS_MMIO
|| start.0 + len as u64 <= MMIO64_MEM_START))
);
// All regions have non-zero length
assert!(regions.iter().all(|&(_, len)| len > 0));
// If there's at least two regions, they perfectly snuggle up to one of the two MMIO gaps
if regions.len() >= 2 {
kani::cover!();
assert_eq!(regions[0].0.0 + regions[0].1 as u64, MMIO32_MEM_START);
assert_eq!(regions[1].0.0, FIRST_ADDR_PAST_32BITS);
}
// If there are three regions, the last two perfectly snuggle up to the 64bit
// MMIO gap
if regions.len() == 3 {
kani::cover!();
assert_eq!(regions[1].0.0 + regions[1].1 as u64, MMIO64_MEM_START);
assert_eq!(regions[2].0.0, FIRST_ADDR_PAST_64BITS_MMIO);
}
}
}
#[cfg(test)]
mod tests {
use linux_loader::loader::bootparam::boot_e820_entry;
use super::*;
use crate::arch::x86_64::layout::FIRST_ADDR_PAST_32BITS;
use crate::test_utils::{arch_mem, single_region_mem};
use crate::utils::mib_to_bytes;
use crate::vstate::resources::ResourceAllocator;
#[test]
fn regions_lt_4gb() {
let regions = arch_memory_regions(1usize << 29);
assert_eq!(1, regions.len());
assert_eq!(GuestAddress(0), regions[0].0);
assert_eq!(1usize << 29, regions[0].1);
}
#[test]
fn regions_gt_4gb() {
const MEMORY_SIZE: usize = (1 << 32) + 0x8000;
let regions = arch_memory_regions(MEMORY_SIZE);
assert_eq!(2, regions.len());
assert_eq!(GuestAddress(0), regions[0].0);
assert_eq!(GuestAddress(1u64 << 32), regions[1].0);
assert_eq!(
regions[1],
(
GuestAddress(FIRST_ADDR_PAST_32BITS),
MEMORY_SIZE - regions[0].1
)
)
}
#[test]
fn test_system_configuration() {
let no_vcpus = 4;
let gm = single_region_mem(0x10000);
let mut resource_allocator = ResourceAllocator::new();
let err = mptable::setup_mptable(&gm, &mut resource_allocator, 1);
assert!(matches!(
err.unwrap_err(),
mptable::MptableError::NotEnoughMemory
));
// Now assigning some memory that falls before the 32bit memory hole.
let mem_size = mib_to_bytes(128);
let gm = arch_mem(mem_size);
let mut resource_allocator = ResourceAllocator::new();
mptable::setup_mptable(&gm, &mut resource_allocator, no_vcpus).unwrap();
configure_64bit_boot(&gm, GuestAddress(0), 0, &None).unwrap();
configure_pvh(&gm, GuestAddress(0), &None).unwrap();
// Now assigning some memory that is equal to the start of the 32bit memory hole.
let mem_size = mib_to_bytes(3328);
let gm = arch_mem(mem_size);
let mut resource_allocator = ResourceAllocator::new();
mptable::setup_mptable(&gm, &mut resource_allocator, no_vcpus).unwrap();
configure_64bit_boot(&gm, GuestAddress(0), 0, &None).unwrap();
configure_pvh(&gm, GuestAddress(0), &None).unwrap();
// Now assigning some memory that falls after the 32bit memory hole.
let mem_size = mib_to_bytes(3330);
let gm = arch_mem(mem_size);
let mut resource_allocator = ResourceAllocator::new();
mptable::setup_mptable(&gm, &mut resource_allocator, no_vcpus).unwrap();
configure_64bit_boot(&gm, GuestAddress(0), 0, &None).unwrap();
configure_pvh(&gm, GuestAddress(0), &None).unwrap();
}
#[test]
fn test_add_e820_entry() {
let e820_map = [(boot_e820_entry {
addr: 0x1,
size: 4,
type_: 1,
}); 128];
let expected_params = boot_params {
e820_table: e820_map,
e820_entries: 1,
..Default::default()
};
let mut params: boot_params = Default::default();
add_e820_entry(
&mut params,
e820_map[0].addr,
e820_map[0].size,
e820_map[0].type_,
)
.unwrap();
assert_eq!(
format!("{:?}", params.e820_table[0]),
format!("{:?}", expected_params.e820_table[0])
);
assert_eq!(params.e820_entries, expected_params.e820_entries);
// Exercise the scenario where the field storing the length of the e820 entry table is
// is bigger than the allocated memory.
params.e820_entries = u8::try_from(params.e820_table.len()).unwrap() + 1;
assert!(
add_e820_entry(
&mut params,
e820_map[0].addr,
e820_map[0].size,
e820_map[0].type_
)
.is_err()
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/layout.rs | src/vmm/src/arch/x86_64/layout.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Magic addresses externally used to lay out x86_64 VMs.
use crate::device_manager::mmio::MMIO_LEN;
use crate::utils::mib_to_bytes;
/// Initial stack for the boot CPU.
pub const BOOT_STACK_POINTER: u64 = 0x8ff0;
/// Kernel command line start address.
pub const CMDLINE_START: u64 = 0x20000;
/// Kernel command line maximum size.
pub const CMDLINE_MAX_SIZE: usize = 2048;
/// Start of the high memory.
pub const HIMEM_START: u64 = 0x0010_0000; // 1 MB.
// Typically, on x86 systems 24 IRQs are used for legacy devices (0-23).
// However, the first 5 are reserved.
// We allocate the remaining GSIs to MSIs.
/// First usable GSI for legacy interrupts (IRQ) on x86_64.
pub const GSI_LEGACY_START: u32 = 5;
/// Last usable GSI for legacy interrupts (IRQ) on x86_64.
pub const GSI_LEGACY_END: u32 = 23;
/// Number of legacy GSI (IRQ) available on x86_64.
pub const GSI_LEGACY_NUM: u32 = GSI_LEGACY_END - GSI_LEGACY_START + 1;
/// First GSI used by MSI after legacy GSI.
pub const GSI_MSI_START: u32 = GSI_LEGACY_END + 1;
/// The highest available GSI in KVM (KVM_MAX_IRQ_ROUTES=4096).
pub const GSI_MSI_END: u32 = 4095;
/// Number of GSI available for MSI.
pub const GSI_MSI_NUM: u32 = GSI_MSI_END - GSI_MSI_START + 1;
/// Address for the TSS setup.
pub const KVM_TSS_ADDRESS: u64 = 0xfffb_d000;
/// Address of the hvm_start_info struct used in PVH boot
pub const PVH_INFO_START: u64 = 0x6000;
/// Starting address of array of modules of hvm_modlist_entry type.
/// Used to enable initrd support using the PVH boot ABI.
pub const MODLIST_START: u64 = 0x6040;
/// Address of memory map table used in PVH boot. Can overlap
/// with the zero page address since they are mutually exclusive.
pub const MEMMAP_START: u64 = 0x7000;
/// The 'zero page', a.k.a linux kernel bootparams.
pub const ZERO_PAGE_START: u64 = 0x7000;
/// APIC address
pub const APIC_ADDR: u32 = 0xfee0_0000;
/// IOAPIC address
pub const IOAPIC_ADDR: u32 = 0xfec0_0000;
/// Location of RSDP pointer in x86 machines
pub const RSDP_ADDR: u64 = 0x000e_0000;
/// Start of memory region we will use for system data (MPTable, ACPI, etc). We are putting its
/// start address where EBDA normally starts, i.e. in the last 1 KiB of the first 640KiB of memory
pub const SYSTEM_MEM_START: u64 = 0x9fc00;
/// Size of memory region for system data.
///
/// We reserve the memory between the start of the EBDA up until the location of RSDP pointer,
/// [0x9fc00, 0xe0000) for system data. This is 257 KiB of memory we is enough for our needs and
/// future proof.
///
/// For ACPI we currently need:
///
/// FADT size: 276 bytes
/// XSDT size: 52 bytes (header: 36 bytes, plus pointers of FADT and MADT)
/// MADT size: 2104 bytes (header: 44 bytes, IO-APIC: 12 bytes, LocalAPIC: 8 * #vCPUS)
/// DSDT size: 1907 bytes (header: 36 bytes, legacy devices: 345, GED: 161, VMGenID: 87, VirtIO
/// devices: 71 bytes per device)
///
/// The above assumes a maximum of 256 vCPUs, because that's what ACPI allows, but currently
/// we have a hard limit of up to 32 vCPUs.
///
/// Moreover, for MPTable we need up to 5304 bytes (284 + 20 * #vCPUS) assuming again
/// a maximum number of 256 vCPUs.
///
/// 257KiB is more than we need, however we reserve this space for potential future use of
/// ACPI features (new tables and/or devices).
pub const SYSTEM_MEM_SIZE: u64 = RSDP_ADDR - SYSTEM_MEM_START;
/// First address that cannot be addressed using 32 bit anymore.
pub const FIRST_ADDR_PAST_32BITS: u64 = 1 << 32;
/// The size of the memory area reserved for MMIO 32-bit accesses.
pub const MMIO32_MEM_SIZE: u64 = mib_to_bytes(1024) as u64;
/// The start of the memory area reserved for MMIO 32-bit accesses.
pub const MMIO32_MEM_START: u64 = FIRST_ADDR_PAST_32BITS - MMIO32_MEM_SIZE;
// We dedicate the last 256 MiB of the 32-bit MMIO address space PCIe for memory-mapped access to
// configuration.
/// Size of MMIO region for PCIe configuration accesses.
pub const PCI_MMCONFIG_SIZE: u64 = 256 << 20;
/// Start of MMIO region for PCIe configuration accesses.
pub const PCI_MMCONFIG_START: u64 = IOAPIC_ADDR as u64 - PCI_MMCONFIG_SIZE;
/// MMIO space per PCIe segment
pub const PCI_MMIO_CONFIG_SIZE_PER_SEGMENT: u64 = 4096 * 256;
// We reserve 768 MiB for devices at the beginning of the MMIO region. This includes space both for
// pure MMIO and PCIe devices.
/// Memory region start for boot device.
pub const BOOT_DEVICE_MEM_START: u64 = MMIO32_MEM_START;
/// Beginning of memory region for device MMIO 32-bit accesses
pub const MEM_32BIT_DEVICES_START: u64 = BOOT_DEVICE_MEM_START + MMIO_LEN;
/// Size of memory region for device MMIO 32-bit accesses
pub const MEM_32BIT_DEVICES_SIZE: u64 = PCI_MMCONFIG_START - MEM_32BIT_DEVICES_START;
// 64-bits region for MMIO accesses
/// The start of the memory area reserved for MMIO 64-bit accesses.
pub const MMIO64_MEM_START: u64 = 256 << 30;
/// The size of the memory area reserved for MMIO 64-bit accesses.
pub const MMIO64_MEM_SIZE: u64 = 256 << 30;
// At the moment, all of this region goes to devices
/// Beginning of memory region for device MMIO 64-bit accesses
pub const MEM_64BIT_DEVICES_START: u64 = MMIO64_MEM_START;
/// Size of memory region for device MMIO 32-bit accesses
pub const MEM_64BIT_DEVICES_SIZE: u64 = MMIO64_MEM_SIZE;
/// First address past the 64-bit MMIO gap
pub const FIRST_ADDR_PAST_64BITS_MMIO: u64 = MMIO64_MEM_START + MMIO64_MEM_SIZE;
/// Size of the memory past 64-bit MMIO gap
pub const PAST_64BITS_MMIO_SIZE: u64 = 512 << 30;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/msr.rs | src/vmm/src/arch/x86_64/msr.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Model Specific Registers (MSRs) related functionality.
use bitflags::bitflags;
use kvm_bindings::{MsrList, Msrs, kvm_msr_entry};
use kvm_ioctls::{Kvm, VcpuFd};
use crate::arch::x86_64::generated::hyperv::*;
use crate::arch::x86_64::generated::hyperv_tlfs::*;
use crate::arch::x86_64::generated::msr_index::*;
use crate::arch::x86_64::generated::perf_event::*;
use crate::cpu_config::x86_64::cpuid::common::GetCpuidError;
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
/// MSR related errors.
pub enum MsrError {
/// Failed to create `vmm_sys_util::fam::FamStructWrapper` for MSRs
Fam(#[from] vmm_sys_util::fam::Error),
/// Failed to get MSR index list: {0}
GetMsrIndexList(kvm_ioctls::Error),
/// Invalid CPU vendor: {0}
InvalidVendor(#[from] GetCpuidError),
/// Failed to set MSRs: {0}
SetMsrs(kvm_ioctls::Error),
/// Not all given MSRs were set.
SetMsrsIncomplete,
}
/// MSR range
#[derive(Debug)]
pub struct MsrRange {
/// Base MSR address
pub base: u32,
/// Number of MSRs
pub nmsrs: u32,
}
impl MsrRange {
/// Returns whether `msr` is contained in this MSR range.
pub fn contains(&self, msr: u32) -> bool {
self.base <= msr && msr < self.base + self.nmsrs
}
}
/// Base MSR for APIC
const APIC_BASE_MSR: u32 = 0x800;
/// Number of APIC MSR indexes
const APIC_MSR_INDEXES: u32 = 0x400;
/// Custom MSRs fall in the range 0x4b564d00-0x4b564dff
const MSR_KVM_WALL_CLOCK_NEW: u32 = 0x4b56_4d00;
const MSR_KVM_SYSTEM_TIME_NEW: u32 = 0x4b56_4d01;
const MSR_KVM_ASYNC_PF_EN: u32 = 0x4b56_4d02;
const MSR_KVM_STEAL_TIME: u32 = 0x4b56_4d03;
const MSR_KVM_PV_EOI_EN: u32 = 0x4b56_4d04;
const MSR_KVM_POLL_CONTROL: u32 = 0x4b56_4d05;
const MSR_KVM_ASYNC_PF_INT: u32 = 0x4b56_4d06;
/// Taken from arch/x86/include/asm/msr-index.h
/// Spectre mitigations control MSR
pub const MSR_IA32_SPEC_CTRL: u32 = 0x0000_0048;
/// Architecture capabilities MSR
pub const MSR_IA32_ARCH_CAPABILITIES: u32 = 0x0000_010a;
const MSR_IA32_PRED_CMD: u32 = 0x0000_0049;
bitflags! {
/// Feature flags enumerated in the IA32_ARCH_CAPABILITIES MSR.
/// See https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/cpuid-enumeration-and-architectural-msrs.html
#[derive(Default)]
#[repr(C)]
pub struct ArchCapaMSRFlags: u64 {
/// The processor is not susceptible to Rogue Data Cache Load (RDCL).
const RDCL_NO = 1 << 0;
/// The processor supports enhanced Indirect Branch Restriction Speculation (IBRS)
const IBRS_ALL = 1 << 1;
/// The processor supports RSB Alternate. Alternative branch predictors may be used by RET instructions
/// when the RSB is empty. Software using retpoline may be affected by this behavior.
const RSBA = 1 << 2;
/// A value of 1 indicates the hypervisor need not flush the L1D on VM entry.
const SKIP_L1DFL_VMENTRY = 1 << 3;
/// Processor is not susceptible to Speculative Store Bypass (SSB).
const SSB_NO = 1 << 4;
/// Processor is not susceptible to Microarchitectural Data Sampling (MDS).
const MDS_NO = 1 << 5;
/// The processor is not susceptible to a machine check error due to modifying the size of a code page
/// without TLB invalidation.
const IF_PSCHANGE_MC_NO = 1 << 6;
/// The processor supports RTM_DISABLE and TSX_CPUID_CLEAR.
const TSX_CTRL = 1 << 7;
/// Processor is not susceptible to Intel® Transactional Synchronization Extensions
/// (Intel® TSX) Asynchronous Abort (TAA).
const TAA_NO = 1 << 8;
// Bit 9 is reserved
/// Processor supports IA32_MISC_PACKAGE_CTRLS MSR.
const MISC_PACKAGE_CTRLS = 1 << 10;
/// Processor supports setting and reading IA32_MISC_PACKAGE_CTLS[0] (ENERGY_FILTERING_ENABLE) bit.
const ENERGY_FILTERING_CTL = 1 << 11;
/// The processor supports data operand independent timing mode.
const DOITM = 1 << 12;
/// The processor is not affected by either the Shared Buffers Data Read (SBDR) vulnerability or the
/// Sideband Stale Data Propagator (SSDP).
const SBDR_SSDP_NO = 1 << 13;
/// The processor is not affected by the Fill Buffer Stale Data Propagator (FBSDP).
const FBSDP_NO = 1 << 14;
/// The processor is not affected by vulnerabilities involving the Primary Stale Data Propagator (PSDP).
const PSDP_NO = 1 << 15;
// Bit 16 is reserved
/// The processor will overwrite fill buffer values as part of MD_CLEAR operations with the VERW instruction.
/// On these processors, L1D_FLUSH does not overwrite fill buffer values.
const FB_CLEAR = 1 << 17;
/// The processor supports read and write to the IA32_MCU_OPT_CTRL MSR (MSR 123H) and to the FB_CLEAR_DIS bit
/// in that MSR (bit position 3).
const FB_CLEAR_CTRL = 1 << 18;
/// A value of 1 indicates processor may have the RRSBA alternate prediction behavior,
/// if not disabled by RRSBA_DIS_U or RRSBA_DIS_S.
const RRSBA = 1 << 19;
/// A value of 1 indicates BHI_NO branch prediction behavior,
/// regardless of the value of IA32_SPEC_CTRL[BHI_DIS_S] MSR bit.
const BHI_NO = 1 << 20;
// Bits 21:22 are reserved
/// If set, the IA32_OVERCLOCKING STATUS MSR exists.
const OVERCLOCKING_STATUS = 1 << 23;
// Bits 24:63 are reserved
}
}
/// Macro for generating a MsrRange.
#[macro_export]
macro_rules! MSR_RANGE {
($base:expr, $nmsrs:expr) => {
MsrRange {
base: $base,
nmsrs: $nmsrs,
}
};
($base:expr) => {
MSR_RANGE!($base, 1)
};
}
// List of MSRs that can be serialized. List is sorted in ascending order of MSRs addresses.
static SERIALIZABLE_MSR_RANGES: &[MsrRange] = &[
MSR_RANGE!(MSR_IA32_P5_MC_ADDR),
MSR_RANGE!(MSR_IA32_P5_MC_TYPE),
MSR_RANGE!(MSR_IA32_TSC),
MSR_RANGE!(MSR_IA32_PLATFORM_ID),
MSR_RANGE!(MSR_IA32_APICBASE),
MSR_RANGE!(MSR_IA32_EBL_CR_POWERON),
MSR_RANGE!(MSR_EBC_FREQUENCY_ID),
MSR_RANGE!(MSR_SMI_COUNT),
MSR_RANGE!(MSR_IA32_FEAT_CTL),
MSR_RANGE!(MSR_IA32_TSC_ADJUST),
MSR_RANGE!(MSR_IA32_SPEC_CTRL),
MSR_RANGE!(MSR_IA32_PRED_CMD),
MSR_RANGE!(MSR_IA32_UCODE_WRITE),
MSR_RANGE!(MSR_IA32_UCODE_REV),
MSR_RANGE!(MSR_IA32_SMBASE),
MSR_RANGE!(MSR_FSB_FREQ),
MSR_RANGE!(MSR_PLATFORM_INFO),
MSR_RANGE!(MSR_PKG_CST_CONFIG_CONTROL),
MSR_RANGE!(MSR_IA32_MPERF),
MSR_RANGE!(MSR_IA32_APERF),
MSR_RANGE!(MSR_MTRRcap),
MSR_RANGE!(MSR_IA32_BBL_CR_CTL3),
MSR_RANGE!(MSR_IA32_SYSENTER_CS),
MSR_RANGE!(MSR_IA32_SYSENTER_ESP),
MSR_RANGE!(MSR_IA32_SYSENTER_EIP),
MSR_RANGE!(MSR_IA32_MCG_CAP),
MSR_RANGE!(MSR_IA32_MCG_STATUS),
MSR_RANGE!(MSR_IA32_MCG_CTL),
MSR_RANGE!(MSR_IA32_PERF_STATUS),
MSR_RANGE!(MSR_IA32_MISC_ENABLE),
MSR_RANGE!(MSR_MISC_FEATURE_CONTROL),
MSR_RANGE!(MSR_MISC_PWR_MGMT),
MSR_RANGE!(MSR_TURBO_RATIO_LIMIT),
MSR_RANGE!(MSR_TURBO_RATIO_LIMIT1),
MSR_RANGE!(MSR_IA32_DEBUGCTLMSR),
MSR_RANGE!(MSR_IA32_LASTBRANCHFROMIP),
MSR_RANGE!(MSR_IA32_LASTBRANCHTOIP),
MSR_RANGE!(MSR_IA32_LASTINTFROMIP),
MSR_RANGE!(MSR_IA32_LASTINTTOIP),
MSR_RANGE!(MSR_IA32_POWER_CTL),
MSR_RANGE!(
// IA32_MTRR_PHYSBASE0
0x200, 0x100
),
MSR_RANGE!(
// MSR_CORE_C3_RESIDENCY
// MSR_CORE_C6_RESIDENCY
// MSR_CORE_C7_RESIDENCY
MSR_CORE_C3_RESIDENCY,
3
),
MSR_RANGE!(MSR_IA32_MC0_CTL, 0x80),
MSR_RANGE!(MSR_RAPL_POWER_UNIT),
MSR_RANGE!(
// MSR_PKGC3_IRTL
// MSR_PKGC6_IRTL
// MSR_PKGC7_IRTL
MSR_PKGC3_IRTL,
3
),
MSR_RANGE!(MSR_PKG_POWER_LIMIT),
MSR_RANGE!(MSR_PKG_ENERGY_STATUS),
MSR_RANGE!(MSR_PKG_PERF_STATUS),
MSR_RANGE!(MSR_PKG_POWER_INFO),
MSR_RANGE!(MSR_DRAM_POWER_LIMIT),
MSR_RANGE!(MSR_DRAM_ENERGY_STATUS),
MSR_RANGE!(MSR_DRAM_PERF_STATUS),
MSR_RANGE!(MSR_DRAM_POWER_INFO),
MSR_RANGE!(MSR_CONFIG_TDP_NOMINAL),
MSR_RANGE!(MSR_CONFIG_TDP_LEVEL_1),
MSR_RANGE!(MSR_CONFIG_TDP_LEVEL_2),
MSR_RANGE!(MSR_CONFIG_TDP_CONTROL),
MSR_RANGE!(MSR_TURBO_ACTIVATION_RATIO),
MSR_RANGE!(MSR_IA32_TSC_DEADLINE),
MSR_RANGE!(APIC_BASE_MSR, APIC_MSR_INDEXES),
MSR_RANGE!(MSR_KVM_WALL_CLOCK_NEW),
MSR_RANGE!(MSR_KVM_SYSTEM_TIME_NEW),
MSR_RANGE!(MSR_KVM_ASYNC_PF_EN),
MSR_RANGE!(MSR_KVM_STEAL_TIME),
MSR_RANGE!(MSR_KVM_PV_EOI_EN),
MSR_RANGE!(MSR_EFER),
MSR_RANGE!(MSR_STAR),
MSR_RANGE!(MSR_LSTAR),
MSR_RANGE!(MSR_CSTAR),
MSR_RANGE!(MSR_SYSCALL_MASK),
MSR_RANGE!(MSR_FS_BASE),
MSR_RANGE!(MSR_GS_BASE),
MSR_RANGE!(MSR_KERNEL_GS_BASE),
MSR_RANGE!(MSR_TSC_AUX),
MSR_RANGE!(MSR_MISC_FEATURES_ENABLES),
MSR_RANGE!(MSR_K7_HWCR),
MSR_RANGE!(MSR_KVM_POLL_CONTROL),
MSR_RANGE!(MSR_KVM_ASYNC_PF_INT),
MSR_RANGE!(MSR_IA32_TSX_CTRL),
];
/// Specifies whether a particular MSR should be included in vcpu serialization.
///
/// # Arguments
///
/// * `index` - The index of the MSR that is checked whether it's needed for serialization.
pub fn msr_should_serialize(index: u32) -> bool {
// Denied MSR not exported by Linux: IA32_MCG_CTL
if index == MSR_IA32_MCG_CTL {
return false;
};
SERIALIZABLE_MSR_RANGES
.iter()
.any(|range| range.contains(index))
}
/// Returns the list of serializable MSR indices.
///
/// # Arguments
///
/// * `kvm_fd` - Ref to `kvm_ioctls::Kvm`.
///
/// # Errors
///
/// When:
/// - [`kvm_ioctls::Kvm::get_msr_index_list()`] errors.
pub fn get_msrs_to_save(kvm_fd: &Kvm) -> Result<MsrList, MsrError> {
let mut msr_index_list = kvm_fd
.get_msr_index_list()
.map_err(MsrError::GetMsrIndexList)?;
msr_index_list.retain(|msr_index| msr_should_serialize(*msr_index));
Ok(msr_index_list)
}
// List of MSRs that cannot be dumped.
//
// KVM_GET_MSR_INDEX_LIST returns some MSR indices that KVM_GET_MSRS fails to get depending on
// configuration. For example, Firecracker disables PMU by default in CPUID normalization for CPUID
// leaf 0xA. Due to this, some PMU-related MSRs cannot be retrieved via KVM_GET_MSRS. The dependency
// on CPUID leaf 0xA can be found in the following link.
// https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/x86/kvm/vmx/pmu_intel.c?h=v5.10.176#n325
//
// The list of MSR indices returned by KVM_GET_MSR_INDEX_LIST can be found in the following link
// (`msrs_to_save_all` + `num_emulated_msrs`).
// https://elixir.bootlin.com/linux/v5.10.176/source/arch/x86/kvm/x86.c#L1211
const UNDUMPABLE_MSR_RANGES: [MsrRange; 17] = [
// - MSR_ARCH_PERFMON_FIXED_CTRn (0x309..=0x30C): CPUID.0Ah:EDX[0:4] > 0
MSR_RANGE!(MSR_ARCH_PERFMON_FIXED_CTR0, 4),
// - MSR_CORE_PERF_FIXED_CTR_CTRL (0x38D): CPUID:0Ah:EAX[7:0] > 1
// - MSR_CORE_PERF_GLOBAL_STATUS (0x38E): CPUID:0Ah:EAX[7:0] > 0 ||
// (CPUID.(EAX=07H,ECX=0):EBX[25] = 1 && CPUID.(EAX=014H,ECX=0):ECX[0] = 1)
// - MSR_CORE_PERF_GLOBAL_CTRL (0x39F): CPUID.0AH: EAX[7:0] > 0
// - MSR_CORE_PERF_GLOBAL_OVF_CTRL (0x390): CPUID.0AH: EAX[7:0] > 0 && CPUID.0AH: EAX[7:0] <= 3
MSR_RANGE!(MSR_CORE_PERF_FIXED_CTR_CTRL, 4),
// - MSR_ARCH_PERFMON_PERFCTRn (0xC1..=0xC8): CPUID.0AH:EAX[15:8] > 0
MSR_RANGE!(MSR_ARCH_PERFMON_PERFCTR0, 8),
// - MSR_ARCH_PERFMON_EVENTSELn (0x186..=0x18D): CPUID.0AH:EAX[15:8] > 0
MSR_RANGE!(MSR_ARCH_PERFMON_EVENTSEL0, 8),
// On kernel 4.14, IA32_MCG_CTL (0x17B) can be retrieved only if IA32_MCG_CAP.CTL_P[8] = 1 for
// vCPU. IA32_MCG_CAP can be set up via KVM_X86_SETUP_MCE API, but Firecracker doesn't use it.
// https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/x86/kvm/x86.c?h=v4.14.311#n2553
MSR_RANGE!(MSR_IA32_MCG_CTL),
// Firecracker is not tested with nested virtualization. Some CPU templates intentionally
// disable nested virtualization. If nested virtualization is disabled, VMX-related MSRs cannot
// be dumped. It can be seen in the following link that VMX-related MSRs depend on whether
// nested virtualization is allowed.
// https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/x86/kvm/vmx/vmx.c?h=v5.10.176#n1950
// - MSR_IA32_VMX_BASIC (0x480)
// - MSR_IA32_VMX_PINBASED_CTLS (0x481)
// - MSR_IA32_VMX_PROCBASED_CTLS (0x482)
// - MSR_IA32_VMX_EXIT_CTLS (0x483)
// - MSR_IA32_VMX_ENTRY_CTLS (0x484)
// - MSR_IA32_VMX_MISC (0x485)
// - MSR_IA32_VMX_CR0_FIXED0 (0x486)
// - MSR_IA32_VMX_CR0_FIXED1 (0x487)
// - MSR_IA32_VMX_CR4_FIXED0 (0x488)
// - MSR_IA32_VMX_CR4_FIXED1 (0x489)
// - MSR_IA32_VMX_VMCS_ENUM (0x48A)
// - MSR_IA32_VMX_PROCBASED_CTLS2 (0x48B)
// - MSR_IA32_VMX_EPT_VPID_CAP (0x48C)
// - MSR_IA32_VMX_TRUE_PINBASED_CTLS (0x48D)
// - MSR_IA32_VMX_TRUE_PROCBASED_CTLS (0x48E)
// - MSR_IA32_VMX_TRUE_EXIT_CTLS (0x48F)
// - MSR_IA32_VMX_TRUE_ENTRY_CTLS (0x490)
// - MSR_IA32_VMX_VMFUNC (0x491)
MSR_RANGE!(MSR_IA32_VMX_BASIC, 18),
// Firecracker doesn't work with Hyper-V. KVM_GET_MSRS fails on kernel 4.14 because it doesn't
// have the following patch.
// https://github.com/torvalds/linux/commit/44883f01fe6ae436a8604c47d8435276fef369b0
// - HV_X64_MSR_GUEST_OS_ID (0x40000000)
// - HV_X64_MSR_HYPERCALL (0x40000001)
// - HV_X64_MSR_VP_INDEX (0x40000002)
// - HV_X64_MSR_RESET (0x40000003)
// - HV_X64_MSR_VP_RUNTIME (0x40000010)
// - HV_X64_MSR_TIME_REF_COUNT (0x40000020)
// - HV_X64_MSR_REFERENCE_TSC (0x40000021)
// - HV_X64_MSR_TSC_FREQUENCY (0x40000022)
// - HV_X64_MSR_APIC_FREQUENCY (0x40000023)
// - HV_X64_MSR_VP_ASSIST_PAGE (0x40000073)
// - HV_X64_MSR_SCONTROL (0x40000080)
// - HV_X64_MSR_STIMER0_CONFIG (0x400000b0)
// - HV_X64_MSR_SYNDBG_CONTROL (0x400000f1)
// - HV_X64_MSR_SYNDBG_STATUS (0x400000f2)
// - HV_X64_MSR_SYNDBG_SEND_BUFFER (0x400000f3)
// - HV_X64_MSR_SYNDBG_RECV_BUFFER (0x400000f4)
// - HV_X64_MSR_SYNDBG_PENDING_BUFFER (0x400000f5)
// - HV_X64_MSR_SYNDBG_OPTIONS (0x400000ff)
// - HV_X64_MSR_CRASH_Pn (0x40000100..=0x40000104)
// - HV_X64_MSR_CRASH_CTL (0x40000105)
// - HV_X64_MSR_REENLIGHTENMENT_CONTROL (0x40000106)
// - HV_X64_MSR_TSC_EMULATION_CONTROL (0x40000107)
// - HV_X64_MSR_TSC_EMULATION_STATUS (0x40000108)
// - HV_X64_MSR_TSC_INVARIANT_CONTROL (0x40000118)
MSR_RANGE!(HV_X64_MSR_GUEST_OS_ID, 4),
MSR_RANGE!(HV_X64_MSR_VP_RUNTIME),
MSR_RANGE!(HV_X64_MSR_TIME_REF_COUNT, 4),
MSR_RANGE!(HV_X64_MSR_SCONTROL),
MSR_RANGE!(HV_X64_MSR_VP_ASSIST_PAGE),
MSR_RANGE!(HV_X64_MSR_STIMER0_CONFIG),
MSR_RANGE!(HV_X64_MSR_SYNDBG_CONTROL, 5),
MSR_RANGE!(HV_X64_MSR_SYNDBG_OPTIONS),
MSR_RANGE!(HV_X64_MSR_CRASH_P0, 6),
MSR_RANGE!(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 3),
MSR_RANGE!(HV_X64_MSR_TSC_INVARIANT_CONTROL),
];
/// Checks whether a particular MSR can be dumped.
///
/// # Arguments
///
/// * `index` - The index of the MSR that is checked whether it's needed for serialization.
pub fn msr_is_dumpable(index: u32) -> bool {
!UNDUMPABLE_MSR_RANGES
.iter()
.any(|range| range.contains(index))
}
/// Returns the list of dumpable MSR indices.
///
/// # Arguments
///
/// * `kvm_fd` - Ref to `Kvm`
///
/// # Errors
///
/// When:
/// - [`kvm_ioctls::Kvm::get_msr_index_list()`] errors.
pub fn get_msrs_to_dump(kvm_fd: &Kvm) -> Result<MsrList, MsrError> {
let mut msr_index_list = kvm_fd
.get_msr_index_list()
.map_err(MsrError::GetMsrIndexList)?;
msr_index_list.retain(|msr_index| msr_is_dumpable(*msr_index));
Ok(msr_index_list)
}
/// Creates and populates required MSR entries for booting Linux on X86_64.
pub fn create_boot_msr_entries() -> Vec<kvm_msr_entry> {
let msr_entry_default = |msr| kvm_msr_entry {
index: msr,
data: 0x0,
..Default::default()
};
vec![
msr_entry_default(MSR_IA32_SYSENTER_CS),
msr_entry_default(MSR_IA32_SYSENTER_ESP),
msr_entry_default(MSR_IA32_SYSENTER_EIP),
// x86_64 specific msrs, we only run on x86_64 not x86.
msr_entry_default(MSR_STAR),
msr_entry_default(MSR_CSTAR),
msr_entry_default(MSR_KERNEL_GS_BASE),
msr_entry_default(MSR_SYSCALL_MASK),
msr_entry_default(MSR_LSTAR),
// end of x86_64 specific code
msr_entry_default(MSR_IA32_TSC),
kvm_msr_entry {
index: MSR_IA32_MISC_ENABLE,
data: u64::from(MSR_IA32_MISC_ENABLE_FAST_STRING),
..Default::default()
},
// set default memory type for physical memory outside configured
// memory ranges to write-back by setting MTRR enable bit (11) and
// setting memory type to write-back (value 6).
// https://wiki.osdev.org/MTRR
kvm_msr_entry {
index: MSR_MTRRdefType,
data: (1 << 11) | 0x6,
..Default::default()
},
]
}
/// Configure Model Specific Registers (MSRs) required to boot Linux for a given x86_64 vCPU.
///
/// # Arguments
///
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
///
/// # Errors
///
/// When:
/// - Failed to create [`vmm_sys_util::fam::FamStructWrapper`] for MSRs.
/// - [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_msrs`] errors.
/// - [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_msrs`] fails to write all given MSRs entries.
pub fn set_msrs(vcpu: &VcpuFd, msr_entries: &[kvm_msr_entry]) -> Result<(), MsrError> {
let msrs = Msrs::from_entries(msr_entries)?;
vcpu.set_msrs(&msrs)
.map_err(MsrError::SetMsrs)
.and_then(|msrs_written| {
if msrs_written == msrs.as_fam_struct_ref().nmsrs as usize {
Ok(())
} else {
Err(MsrError::SetMsrsIncomplete)
}
})
}
#[cfg(test)]
mod tests {
use kvm_ioctls::Kvm;
use super::*;
fn create_vcpu() -> VcpuFd {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
vm.create_vcpu(0).unwrap()
}
#[test]
fn test_msr_list_to_serialize() {
for range in SERIALIZABLE_MSR_RANGES.iter() {
for msr in range.base..(range.base + range.nmsrs) {
let should = !matches!(msr, MSR_IA32_MCG_CTL);
assert_eq!(msr_should_serialize(msr), should);
}
}
}
#[test]
fn test_msr_list_to_dump() {
for range in UNDUMPABLE_MSR_RANGES.iter() {
for msr in range.base..(range.base + range.nmsrs) {
assert!(!msr_is_dumpable(msr));
}
}
}
#[test]
#[allow(clippy::cast_ptr_alignment)]
fn test_setup_msrs() {
let vcpu = create_vcpu();
let msr_boot_entries = create_boot_msr_entries();
set_msrs(&vcpu, &msr_boot_entries).unwrap();
// This test will check against the last MSR entry configured (the tenth one).
// See create_msr_entries() for details.
let test_kvm_msrs_entry = [kvm_msr_entry {
index: MSR_IA32_MISC_ENABLE,
..Default::default()
}];
let mut kvm_msrs_wrapper = Msrs::from_entries(&test_kvm_msrs_entry).unwrap();
// Get_msrs() returns the number of msrs that it succeed in reading.
// We only want to read one in this test case scenario.
let read_nmsrs = vcpu.get_msrs(&mut kvm_msrs_wrapper).unwrap();
// Validate it only read one.
assert_eq!(read_nmsrs, 1);
// Official entries that were setup when we did setup_msrs. We need to assert that the
// tenth one (i.e the one with index MSR_IA32_MISC_ENABLE has the data we
// expect.
let entry_vec = create_boot_msr_entries();
assert_eq!(entry_vec[9], kvm_msrs_wrapper.as_slice()[0]);
}
#[test]
fn test_set_valid_msrs() {
// Test `set_msrs()` with a valid MSR entry. It should succeed, as IA32_TSC MSR is listed
// in supported MSRs as of now.
let vcpu = create_vcpu();
let msr_entries = vec![kvm_msr_entry {
index: MSR_IA32_TSC,
data: 0,
..Default::default()
}];
set_msrs(&vcpu, &msr_entries).unwrap();
}
#[test]
fn test_set_invalid_msrs() {
// Test `set_msrs()` with an invalid MSR entry. It should fail, as MSR index 2 is not
// listed in supported MSRs as of now. If hardware vendor adds this MSR index and KVM
// supports this MSR, we need to change the index as needed.
let vcpu = create_vcpu();
let msr_entries = vec![kvm_msr_entry {
index: 2,
..Default::default()
}];
assert_eq!(
set_msrs(&vcpu, &msr_entries).unwrap_err(),
MsrError::SetMsrsIncomplete
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/regs.rs | src/vmm/src/arch/x86_64/regs.rs | // Copyright © 2020, Oracle and/or its affiliates.
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::mem;
use kvm_bindings::{kvm_fpu, kvm_regs, kvm_sregs};
use kvm_ioctls::VcpuFd;
use super::super::{BootProtocol, EntryPoint};
use super::gdt::{gdt_entry, kvm_segment_from_gdt};
use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap};
// Initial pagetables.
const PML4_START: u64 = 0x9000;
const PDPTE_START: u64 = 0xa000;
const PDE_START: u64 = 0xb000;
/// Errors thrown while setting up x86_64 registers.
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum RegsError {
/// Failed to get SREGs for this CPU: {0}
GetStatusRegisters(kvm_ioctls::Error),
/// Failed to set base registers for this CPU: {0}
SetBaseRegisters(kvm_ioctls::Error),
/// Failed to configure the FPU: {0}
SetFPURegisters(kvm_ioctls::Error),
/// Failed to set SREGs for this CPU: {0}
SetStatusRegisters(kvm_ioctls::Error),
/// Writing the GDT to RAM failed.
WriteGDT,
/// Writing the IDT to RAM failed
WriteIDT,
/// WritePDPTEAddress
WritePDPTEAddress,
/// WritePDEAddress
WritePDEAddress,
/// WritePML4Address
WritePML4Address,
}
/// Error type for [`setup_fpu`].
#[derive(Debug, derive_more::From, PartialEq, Eq, thiserror::Error)]
#[error("Failed to setup FPU: {0}")]
pub struct SetupFpuError(vmm_sys_util::errno::Error);
/// Configure Floating-Point Unit (FPU) registers for a given CPU.
///
/// # Arguments
///
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
///
/// # Errors
///
/// When [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_fpu`] errors.
pub fn setup_fpu(vcpu: &VcpuFd) -> Result<(), SetupFpuError> {
let fpu: kvm_fpu = kvm_fpu {
fcw: 0x37f,
mxcsr: 0x1f80,
..Default::default()
};
vcpu.set_fpu(&fpu).map_err(SetupFpuError)
}
/// Error type of [`setup_regs`].
#[derive(Debug, derive_more::From, PartialEq, Eq, thiserror::Error)]
#[error("Failed to setup registers: {0}")]
pub struct SetupRegistersError(vmm_sys_util::errno::Error);
/// Configure base registers for a given CPU.
///
/// # Arguments
///
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
/// * `boot_ip` - Starting instruction pointer.
///
/// # Errors
///
/// When [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_regs`] errors.
pub fn setup_regs(vcpu: &VcpuFd, entry_point: EntryPoint) -> Result<(), SetupRegistersError> {
let regs: kvm_regs = match entry_point.protocol {
BootProtocol::PvhBoot => kvm_regs {
// Configure regs as required by PVH boot protocol.
rflags: 0x0000_0000_0000_0002u64,
rbx: super::layout::PVH_INFO_START,
rip: entry_point.entry_addr.raw_value(),
..Default::default()
},
BootProtocol::LinuxBoot => kvm_regs {
// Configure regs as required by Linux 64-bit boot protocol.
rflags: 0x0000_0000_0000_0002u64,
rip: entry_point.entry_addr.raw_value(),
// Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments
// are made to rsp (i.e. reserving space for local variables or pushing
// values on to the stack), local variables and function parameters are
// still accessible from a constant offset from rbp.
rsp: super::layout::BOOT_STACK_POINTER,
// Starting stack pointer.
rbp: super::layout::BOOT_STACK_POINTER,
// Must point to zero page address per Linux ABI. This is x86_64 specific.
rsi: super::layout::ZERO_PAGE_START,
..Default::default()
},
};
vcpu.set_regs(®s).map_err(SetupRegistersError)
}
/// Error type for [`setup_sregs`].
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum SetupSpecialRegistersError {
/// Failed to get special registers: {0}
GetSpecialRegisters(vmm_sys_util::errno::Error),
/// Failed to configure segments and special registers: {0}
ConfigureSegmentsAndSpecialRegisters(RegsError),
/// Failed to setup page tables: {0}
SetupPageTables(RegsError),
/// Failed to set special registers: {0}
SetSpecialRegisters(vmm_sys_util::errno::Error),
}
/// Configures the special registers and system page tables for a given CPU.
///
/// # Arguments
///
/// * `mem` - The memory that will be passed to the guest.
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
/// * `boot_prot` - The boot protocol being used.
///
/// # Errors
///
/// When:
/// - [`kvm_ioctls::ioctls::vcpu::VcpuFd::get_sregs`] errors.
/// - [`configure_segments_and_sregs`] errors.
/// - [`setup_page_tables`] errors
/// - [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_sregs`] errors.
pub fn setup_sregs(
mem: &GuestMemoryMmap,
vcpu: &VcpuFd,
boot_prot: BootProtocol,
) -> Result<(), SetupSpecialRegistersError> {
let mut sregs: kvm_sregs = vcpu
.get_sregs()
.map_err(SetupSpecialRegistersError::GetSpecialRegisters)?;
configure_segments_and_sregs(mem, &mut sregs, boot_prot)
.map_err(SetupSpecialRegistersError::ConfigureSegmentsAndSpecialRegisters)?;
if let BootProtocol::LinuxBoot = boot_prot {
setup_page_tables(mem, &mut sregs).map_err(SetupSpecialRegistersError::SetupPageTables)?;
// TODO(dgreid) - Can this be done once per system instead?
}
vcpu.set_sregs(&sregs)
.map_err(SetupSpecialRegistersError::SetSpecialRegisters)
}
const BOOT_GDT_OFFSET: u64 = 0x500;
const BOOT_IDT_OFFSET: u64 = 0x520;
const BOOT_GDT_MAX: usize = 4;
const EFER_LMA: u64 = 0x400;
const EFER_LME: u64 = 0x100;
const X86_CR0_PE: u64 = 0x1;
const X86_CR0_ET: u64 = 0x10;
const X86_CR0_PG: u64 = 0x8000_0000;
const X86_CR4_PAE: u64 = 0x20;
fn write_gdt_table(table: &[u64], guest_mem: &GuestMemoryMmap) -> Result<(), RegsError> {
let boot_gdt_addr = GuestAddress(BOOT_GDT_OFFSET);
for (index, entry) in table.iter().enumerate() {
let addr = guest_mem
.checked_offset(boot_gdt_addr, index * mem::size_of::<u64>())
.ok_or(RegsError::WriteGDT)?;
guest_mem
.write_obj(*entry, addr)
.map_err(|_| RegsError::WriteGDT)?;
}
Ok(())
}
fn write_idt_value(val: u64, guest_mem: &GuestMemoryMmap) -> Result<(), RegsError> {
let boot_idt_addr = GuestAddress(BOOT_IDT_OFFSET);
guest_mem
.write_obj(val, boot_idt_addr)
.map_err(|_| RegsError::WriteIDT)
}
fn configure_segments_and_sregs(
mem: &GuestMemoryMmap,
sregs: &mut kvm_sregs,
boot_prot: BootProtocol,
) -> Result<(), RegsError> {
let gdt_table: [u64; BOOT_GDT_MAX] = match boot_prot {
BootProtocol::PvhBoot => {
// Configure GDT entries as specified by PVH boot protocol
[
gdt_entry(0, 0, 0), // NULL
gdt_entry(0xc09b, 0, 0xffff_ffff), // CODE
gdt_entry(0xc093, 0, 0xffff_ffff), // DATA
gdt_entry(0x008b, 0, 0x67), // TSS
]
}
BootProtocol::LinuxBoot => {
// Configure GDT entries as specified by Linux 64bit boot protocol
[
gdt_entry(0, 0, 0), // NULL
gdt_entry(0xa09b, 0, 0xfffff), // CODE
gdt_entry(0xc093, 0, 0xfffff), // DATA
gdt_entry(0x808b, 0, 0xfffff), // TSS
]
}
};
let code_seg = kvm_segment_from_gdt(gdt_table[1], 1);
let data_seg = kvm_segment_from_gdt(gdt_table[2], 2);
let tss_seg = kvm_segment_from_gdt(gdt_table[3], 3);
// Write segments
write_gdt_table(&gdt_table[..], mem)?;
sregs.gdt.base = BOOT_GDT_OFFSET;
sregs.gdt.limit = u16::try_from(mem::size_of_val(&gdt_table)).unwrap() - 1;
write_idt_value(0, mem)?;
sregs.idt.base = BOOT_IDT_OFFSET;
sregs.idt.limit = u16::try_from(mem::size_of::<u64>()).unwrap() - 1;
sregs.cs = code_seg;
sregs.ds = data_seg;
sregs.es = data_seg;
sregs.fs = data_seg;
sregs.gs = data_seg;
sregs.ss = data_seg;
sregs.tr = tss_seg;
match boot_prot {
BootProtocol::PvhBoot => {
sregs.cr0 = X86_CR0_PE | X86_CR0_ET;
sregs.cr4 = 0;
}
BootProtocol::LinuxBoot => {
// 64-bit protected mode
sregs.cr0 |= X86_CR0_PE;
sregs.efer |= EFER_LME | EFER_LMA;
}
}
Ok(())
}
fn setup_page_tables(mem: &GuestMemoryMmap, sregs: &mut kvm_sregs) -> Result<(), RegsError> {
// Puts PML4 right after zero page but aligned to 4k.
let boot_pml4_addr = GuestAddress(PML4_START);
let boot_pdpte_addr = GuestAddress(PDPTE_START);
let boot_pde_addr = GuestAddress(PDE_START);
// Entry covering VA [0..512GB)
mem.write_obj(boot_pdpte_addr.raw_value() | 0x03, boot_pml4_addr)
.map_err(|_| RegsError::WritePML4Address)?;
// Entry covering VA [0..1GB)
mem.write_obj(boot_pde_addr.raw_value() | 0x03, boot_pdpte_addr)
.map_err(|_| RegsError::WritePDPTEAddress)?;
// 512 2MB entries together covering VA [0..1GB). Note we are assuming
// CPU supports 2MB pages (/proc/cpuinfo has 'pse'). All modern CPUs do.
for i in 0..512 {
mem.write_obj((i << 21) + 0x83u64, boot_pde_addr.unchecked_add(i * 8))
.map_err(|_| RegsError::WritePDEAddress)?;
}
sregs.cr3 = boot_pml4_addr.raw_value();
sregs.cr4 |= X86_CR4_PAE;
sregs.cr0 |= X86_CR0_PG;
Ok(())
}
#[cfg(test)]
mod tests {
#![allow(clippy::cast_possible_truncation)]
use kvm_ioctls::Kvm;
use super::*;
use crate::test_utils::single_region_mem;
use crate::vstate::memory::{Bytes, GuestAddress, GuestMemoryMmap};
fn read_u64(gm: &GuestMemoryMmap, offset: u64) -> u64 {
let read_addr = GuestAddress(offset);
gm.read_obj(read_addr).unwrap()
}
fn validate_segments_and_sregs(
gm: &GuestMemoryMmap,
sregs: &kvm_sregs,
boot_prot: BootProtocol,
) {
if let BootProtocol::LinuxBoot = boot_prot {
assert_eq!(0xaf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
assert_eq!(0x8f_8b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 24));
assert_eq!(0xffff_ffff, sregs.tr.limit);
assert!(sregs.cr0 & X86_CR0_PE != 0);
assert!(sregs.efer & EFER_LME != 0 && sregs.efer & EFER_LMA != 0);
} else {
// Validate values that are specific to PVH boot protocol
assert_eq!(0xcf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
assert_eq!(0x00_8b00_0000_0067, read_u64(gm, BOOT_GDT_OFFSET + 24));
assert_eq!(0x67, sregs.tr.limit);
assert_eq!(0, sregs.tr.g);
assert!(sregs.cr0 & X86_CR0_PE != 0 && sregs.cr0 & X86_CR0_ET != 0);
assert_eq!(0, sregs.cr4);
}
// Common settings for both PVH and Linux boot protocol
assert_eq!(0x0, read_u64(gm, BOOT_GDT_OFFSET));
assert_eq!(0x0, read_u64(gm, BOOT_IDT_OFFSET));
assert_eq!(0, sregs.cs.base);
assert_eq!(0xffff_ffff, sregs.ds.limit);
assert_eq!(0x10, sregs.es.selector);
assert_eq!(1, sregs.fs.present);
assert_eq!(1, sregs.gs.g);
assert_eq!(0, sregs.ss.avl);
assert_eq!(0, sregs.tr.base);
assert_eq!(0, sregs.tr.avl);
}
fn validate_page_tables(gm: &GuestMemoryMmap, sregs: &kvm_sregs) {
assert_eq!(0xa003, read_u64(gm, PML4_START));
assert_eq!(0xb003, read_u64(gm, PDPTE_START));
for i in 0..512 {
assert_eq!((i << 21) + 0x83u64, read_u64(gm, PDE_START + (i * 8)));
}
assert_eq!(PML4_START, sregs.cr3);
assert!(sregs.cr4 & X86_CR4_PAE != 0);
assert!(sregs.cr0 & X86_CR0_PG != 0);
}
#[test]
fn test_setup_fpu() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
setup_fpu(&vcpu).unwrap();
let expected_fpu: kvm_fpu = kvm_fpu {
fcw: 0x37f,
mxcsr: 0x1f80,
..Default::default()
};
let actual_fpu: kvm_fpu = vcpu.get_fpu().unwrap();
// TODO: auto-generate kvm related structures with PartialEq on.
assert_eq!(expected_fpu.fcw, actual_fpu.fcw);
// Setting the mxcsr register from kvm_fpu inside setup_fpu does not influence anything.
// See 'kvm_arch_vcpu_ioctl_set_fpu' from arch/x86/kvm/x86.c.
// The mxcsr will stay 0 and the assert below fails. Decide whether or not we should
// remove it at all.
// assert!(expected_fpu.mxcsr == actual_fpu.mxcsr);
}
#[test]
fn test_setup_regs() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let expected_regs: kvm_regs = kvm_regs {
rflags: 0x0000_0000_0000_0002u64,
rip: 1,
rsp: super::super::layout::BOOT_STACK_POINTER,
rbp: super::super::layout::BOOT_STACK_POINTER,
rsi: super::super::layout::ZERO_PAGE_START,
..Default::default()
};
let entry_point: EntryPoint = EntryPoint {
entry_addr: GuestAddress(expected_regs.rip),
protocol: BootProtocol::LinuxBoot,
};
setup_regs(&vcpu, entry_point).unwrap();
let actual_regs: kvm_regs = vcpu.get_regs().unwrap();
assert_eq!(actual_regs, expected_regs);
}
#[test]
fn test_setup_sregs() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let vcpu = vm.create_vcpu(0).unwrap();
let gm = single_region_mem(0x10000);
[BootProtocol::LinuxBoot, BootProtocol::PvhBoot]
.iter()
.for_each(|boot_prot| {
vcpu.set_sregs(&Default::default()).unwrap();
setup_sregs(&gm, &vcpu, *boot_prot).unwrap();
let mut sregs: kvm_sregs = vcpu.get_sregs().unwrap();
// for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
// We set it to 1, otherwise the test will fail.
sregs.gs.g = 1;
validate_segments_and_sregs(&gm, &sregs, *boot_prot);
if let BootProtocol::LinuxBoot = *boot_prot {
validate_page_tables(&gm, &sregs);
}
});
}
#[test]
fn test_write_gdt_table() {
// Not enough memory for the gdt table to be written.
let gm = single_region_mem(BOOT_GDT_OFFSET as usize);
let gdt_table: [u64; BOOT_GDT_MAX] = [
gdt_entry(0, 0, 0), // NULL
gdt_entry(0xa09b, 0, 0xfffff), // CODE
gdt_entry(0xc093, 0, 0xfffff), // DATA
gdt_entry(0x808b, 0, 0xfffff), // TSS
];
write_gdt_table(&gdt_table, &gm).unwrap_err();
// We allocate exactly the amount needed to write four u64 to `BOOT_GDT_OFFSET`.
let gm =
single_region_mem(BOOT_GDT_OFFSET as usize + (mem::size_of::<u64>() * BOOT_GDT_MAX));
let gdt_table: [u64; BOOT_GDT_MAX] = [
gdt_entry(0, 0, 0), // NULL
gdt_entry(0xa09b, 0, 0xfffff), // CODE
gdt_entry(0xc093, 0, 0xfffff), // DATA
gdt_entry(0x808b, 0, 0xfffff), // TSS
];
write_gdt_table(&gdt_table, &gm).unwrap();
}
#[test]
fn test_write_idt_table() {
// Not enough memory for the a u64 value to fit.
let gm = single_region_mem(BOOT_IDT_OFFSET as usize);
let val = 0x100;
write_idt_value(val, &gm).unwrap_err();
let gm = single_region_mem(BOOT_IDT_OFFSET as usize + mem::size_of::<u64>());
// We have allocated exactly the amount neded to write an u64 to `BOOT_IDT_OFFSET`.
write_idt_value(val, &gm).unwrap();
}
#[test]
fn test_configure_segments_and_sregs() {
let mut sregs: kvm_sregs = Default::default();
let gm = single_region_mem(0x10000);
configure_segments_and_sregs(&gm, &mut sregs, BootProtocol::LinuxBoot).unwrap();
validate_segments_and_sregs(&gm, &sregs, BootProtocol::LinuxBoot);
configure_segments_and_sregs(&gm, &mut sregs, BootProtocol::PvhBoot).unwrap();
validate_segments_and_sregs(&gm, &sregs, BootProtocol::PvhBoot);
}
#[test]
fn test_setup_page_tables() {
let mut sregs: kvm_sregs = Default::default();
let gm = single_region_mem(PML4_START as usize);
setup_page_tables(&gm, &mut sregs).unwrap_err();
let gm = single_region_mem(PDPTE_START as usize);
setup_page_tables(&gm, &mut sregs).unwrap_err();
let gm = single_region_mem(PDE_START as usize);
setup_page_tables(&gm, &mut sregs).unwrap_err();
let gm = single_region_mem(0x10000);
setup_page_tables(&gm, &mut sregs).unwrap();
validate_page_tables(&gm, &sregs);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/generated/mpspec.rs | src/vmm/src/arch/x86_64/generated/mpspec.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const MPC_SIGNATURE: &[u8; 5] = b"PCMP\0";
pub const MP_PROCESSOR: u32 = 0;
pub const MP_BUS: u32 = 1;
pub const MP_IOAPIC: u32 = 2;
pub const MP_INTSRC: u32 = 3;
pub const MP_LINTSRC: u32 = 4;
pub const MP_TRANSLATION: u32 = 192;
pub const CPU_ENABLED: u32 = 1;
pub const CPU_BOOTPROCESSOR: u32 = 2;
pub const CPU_STEPPING_MASK: u32 = 15;
pub const CPU_MODEL_MASK: u32 = 240;
pub const CPU_FAMILY_MASK: u32 = 3840;
pub const BUSTYPE_EISA: &[u8; 5] = b"EISA\0";
pub const BUSTYPE_ISA: &[u8; 4] = b"ISA\0";
pub const BUSTYPE_INTERN: &[u8; 7] = b"INTERN\0";
pub const BUSTYPE_MCA: &[u8; 4] = b"MCA\0";
pub const BUSTYPE_VL: &[u8; 3] = b"VL\0";
pub const BUSTYPE_PCI: &[u8; 4] = b"PCI\0";
pub const BUSTYPE_PCMCIA: &[u8; 7] = b"PCMCIA\0";
pub const BUSTYPE_CBUS: &[u8; 5] = b"CBUS\0";
pub const BUSTYPE_CBUSII: &[u8; 7] = b"CBUSII\0";
pub const BUSTYPE_FUTURE: &[u8; 7] = b"FUTURE\0";
pub const BUSTYPE_MBI: &[u8; 4] = b"MBI\0";
pub const BUSTYPE_MBII: &[u8; 5] = b"MBII\0";
pub const BUSTYPE_MPI: &[u8; 4] = b"MPI\0";
pub const BUSTYPE_MPSA: &[u8; 5] = b"MPSA\0";
pub const BUSTYPE_NUBUS: &[u8; 6] = b"NUBUS\0";
pub const BUSTYPE_TC: &[u8; 3] = b"TC\0";
pub const BUSTYPE_VME: &[u8; 4] = b"VME\0";
pub const BUSTYPE_XPRESS: &[u8; 7] = b"XPRESS\0";
pub const MPC_APIC_USABLE: u32 = 1;
pub const MP_IRQPOL_DEFAULT: u32 = 0;
pub const MP_IRQPOL_ACTIVE_HIGH: u32 = 1;
pub const MP_IRQPOL_RESERVED: u32 = 2;
pub const MP_IRQPOL_ACTIVE_LOW: u32 = 3;
pub const MP_IRQPOL_MASK: u32 = 3;
pub const MP_IRQTRIG_DEFAULT: u32 = 0;
pub const MP_IRQTRIG_EDGE: u32 = 4;
pub const MP_IRQTRIG_RESERVED: u32 = 8;
pub const MP_IRQTRIG_LEVEL: u32 = 12;
pub const MP_IRQTRIG_MASK: u32 = 12;
pub const MP_APIC_ALL: u32 = 255;
pub const MPC_OEM_SIGNATURE: &[u8; 5] = b"_OEM\0";
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct mpf_intel {
pub signature: [::std::os::raw::c_char; 4usize],
pub physptr: ::std::os::raw::c_uint,
pub length: ::std::os::raw::c_uchar,
pub specification: ::std::os::raw::c_uchar,
pub checksum: ::std::os::raw::c_uchar,
pub feature1: ::std::os::raw::c_uchar,
pub feature2: ::std::os::raw::c_uchar,
pub feature3: ::std::os::raw::c_uchar,
pub feature4: ::std::os::raw::c_uchar,
pub feature5: ::std::os::raw::c_uchar,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of mpf_intel"][::std::mem::size_of::<mpf_intel>() - 16usize];
["Alignment of mpf_intel"][::std::mem::align_of::<mpf_intel>() - 4usize];
["Offset of field: mpf_intel::signature"]
[::std::mem::offset_of!(mpf_intel, signature) - 0usize];
["Offset of field: mpf_intel::physptr"][::std::mem::offset_of!(mpf_intel, physptr) - 4usize];
["Offset of field: mpf_intel::length"][::std::mem::offset_of!(mpf_intel, length) - 8usize];
["Offset of field: mpf_intel::specification"]
[::std::mem::offset_of!(mpf_intel, specification) - 9usize];
["Offset of field: mpf_intel::checksum"][::std::mem::offset_of!(mpf_intel, checksum) - 10usize];
["Offset of field: mpf_intel::feature1"][::std::mem::offset_of!(mpf_intel, feature1) - 11usize];
["Offset of field: mpf_intel::feature2"][::std::mem::offset_of!(mpf_intel, feature2) - 12usize];
["Offset of field: mpf_intel::feature3"][::std::mem::offset_of!(mpf_intel, feature3) - 13usize];
["Offset of field: mpf_intel::feature4"][::std::mem::offset_of!(mpf_intel, feature4) - 14usize];
["Offset of field: mpf_intel::feature5"][::std::mem::offset_of!(mpf_intel, feature5) - 15usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct mpc_table {
pub signature: [::std::os::raw::c_char; 4usize],
pub length: ::std::os::raw::c_ushort,
pub spec: ::std::os::raw::c_char,
pub checksum: ::std::os::raw::c_char,
pub oem: [::std::os::raw::c_char; 8usize],
pub productid: [::std::os::raw::c_char; 12usize],
pub oemptr: ::std::os::raw::c_uint,
pub oemsize: ::std::os::raw::c_ushort,
pub oemcount: ::std::os::raw::c_ushort,
pub lapic: ::std::os::raw::c_uint,
pub reserved: ::std::os::raw::c_uint,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of mpc_table"][::std::mem::size_of::<mpc_table>() - 44usize];
["Alignment of mpc_table"][::std::mem::align_of::<mpc_table>() - 4usize];
["Offset of field: mpc_table::signature"]
[::std::mem::offset_of!(mpc_table, signature) - 0usize];
["Offset of field: mpc_table::length"][::std::mem::offset_of!(mpc_table, length) - 4usize];
["Offset of field: mpc_table::spec"][::std::mem::offset_of!(mpc_table, spec) - 6usize];
["Offset of field: mpc_table::checksum"][::std::mem::offset_of!(mpc_table, checksum) - 7usize];
["Offset of field: mpc_table::oem"][::std::mem::offset_of!(mpc_table, oem) - 8usize];
["Offset of field: mpc_table::productid"]
[::std::mem::offset_of!(mpc_table, productid) - 16usize];
["Offset of field: mpc_table::oemptr"][::std::mem::offset_of!(mpc_table, oemptr) - 28usize];
["Offset of field: mpc_table::oemsize"][::std::mem::offset_of!(mpc_table, oemsize) - 32usize];
["Offset of field: mpc_table::oemcount"][::std::mem::offset_of!(mpc_table, oemcount) - 34usize];
["Offset of field: mpc_table::lapic"][::std::mem::offset_of!(mpc_table, lapic) - 36usize];
["Offset of field: mpc_table::reserved"][::std::mem::offset_of!(mpc_table, reserved) - 40usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct mpc_cpu {
pub type_: ::std::os::raw::c_uchar,
pub apicid: ::std::os::raw::c_uchar,
pub apicver: ::std::os::raw::c_uchar,
pub cpuflag: ::std::os::raw::c_uchar,
pub cpufeature: ::std::os::raw::c_uint,
pub featureflag: ::std::os::raw::c_uint,
pub reserved: [::std::os::raw::c_uint; 2usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of mpc_cpu"][::std::mem::size_of::<mpc_cpu>() - 20usize];
["Alignment of mpc_cpu"][::std::mem::align_of::<mpc_cpu>() - 4usize];
["Offset of field: mpc_cpu::type_"][::std::mem::offset_of!(mpc_cpu, type_) - 0usize];
["Offset of field: mpc_cpu::apicid"][::std::mem::offset_of!(mpc_cpu, apicid) - 1usize];
["Offset of field: mpc_cpu::apicver"][::std::mem::offset_of!(mpc_cpu, apicver) - 2usize];
["Offset of field: mpc_cpu::cpuflag"][::std::mem::offset_of!(mpc_cpu, cpuflag) - 3usize];
["Offset of field: mpc_cpu::cpufeature"][::std::mem::offset_of!(mpc_cpu, cpufeature) - 4usize];
["Offset of field: mpc_cpu::featureflag"]
[::std::mem::offset_of!(mpc_cpu, featureflag) - 8usize];
["Offset of field: mpc_cpu::reserved"][::std::mem::offset_of!(mpc_cpu, reserved) - 12usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct mpc_bus {
pub type_: ::std::os::raw::c_uchar,
pub busid: ::std::os::raw::c_uchar,
pub bustype: [::std::os::raw::c_uchar; 6usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of mpc_bus"][::std::mem::size_of::<mpc_bus>() - 8usize];
["Alignment of mpc_bus"][::std::mem::align_of::<mpc_bus>() - 1usize];
["Offset of field: mpc_bus::type_"][::std::mem::offset_of!(mpc_bus, type_) - 0usize];
["Offset of field: mpc_bus::busid"][::std::mem::offset_of!(mpc_bus, busid) - 1usize];
["Offset of field: mpc_bus::bustype"][::std::mem::offset_of!(mpc_bus, bustype) - 2usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct mpc_ioapic {
pub type_: ::std::os::raw::c_uchar,
pub apicid: ::std::os::raw::c_uchar,
pub apicver: ::std::os::raw::c_uchar,
pub flags: ::std::os::raw::c_uchar,
pub apicaddr: ::std::os::raw::c_uint,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of mpc_ioapic"][::std::mem::size_of::<mpc_ioapic>() - 8usize];
["Alignment of mpc_ioapic"][::std::mem::align_of::<mpc_ioapic>() - 4usize];
["Offset of field: mpc_ioapic::type_"][::std::mem::offset_of!(mpc_ioapic, type_) - 0usize];
["Offset of field: mpc_ioapic::apicid"][::std::mem::offset_of!(mpc_ioapic, apicid) - 1usize];
["Offset of field: mpc_ioapic::apicver"][::std::mem::offset_of!(mpc_ioapic, apicver) - 2usize];
["Offset of field: mpc_ioapic::flags"][::std::mem::offset_of!(mpc_ioapic, flags) - 3usize];
["Offset of field: mpc_ioapic::apicaddr"]
[::std::mem::offset_of!(mpc_ioapic, apicaddr) - 4usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct mpc_intsrc {
pub type_: ::std::os::raw::c_uchar,
pub irqtype: ::std::os::raw::c_uchar,
pub irqflag: ::std::os::raw::c_ushort,
pub srcbus: ::std::os::raw::c_uchar,
pub srcbusirq: ::std::os::raw::c_uchar,
pub dstapic: ::std::os::raw::c_uchar,
pub dstirq: ::std::os::raw::c_uchar,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of mpc_intsrc"][::std::mem::size_of::<mpc_intsrc>() - 8usize];
["Alignment of mpc_intsrc"][::std::mem::align_of::<mpc_intsrc>() - 2usize];
["Offset of field: mpc_intsrc::type_"][::std::mem::offset_of!(mpc_intsrc, type_) - 0usize];
["Offset of field: mpc_intsrc::irqtype"][::std::mem::offset_of!(mpc_intsrc, irqtype) - 1usize];
["Offset of field: mpc_intsrc::irqflag"][::std::mem::offset_of!(mpc_intsrc, irqflag) - 2usize];
["Offset of field: mpc_intsrc::srcbus"][::std::mem::offset_of!(mpc_intsrc, srcbus) - 4usize];
["Offset of field: mpc_intsrc::srcbusirq"]
[::std::mem::offset_of!(mpc_intsrc, srcbusirq) - 5usize];
["Offset of field: mpc_intsrc::dstapic"][::std::mem::offset_of!(mpc_intsrc, dstapic) - 6usize];
["Offset of field: mpc_intsrc::dstirq"][::std::mem::offset_of!(mpc_intsrc, dstirq) - 7usize];
};
pub mod mp_irq_source_types {
pub type Type = ::std::os::raw::c_uint;
pub const mp_INT: Type = 0;
pub const mp_NMI: Type = 1;
pub const mp_SMI: Type = 2;
pub const mp_ExtINT: Type = 3;
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct mpc_lintsrc {
pub type_: ::std::os::raw::c_uchar,
pub irqtype: ::std::os::raw::c_uchar,
pub irqflag: ::std::os::raw::c_ushort,
pub srcbusid: ::std::os::raw::c_uchar,
pub srcbusirq: ::std::os::raw::c_uchar,
pub destapic: ::std::os::raw::c_uchar,
pub destapiclint: ::std::os::raw::c_uchar,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of mpc_lintsrc"][::std::mem::size_of::<mpc_lintsrc>() - 8usize];
["Alignment of mpc_lintsrc"][::std::mem::align_of::<mpc_lintsrc>() - 2usize];
["Offset of field: mpc_lintsrc::type_"][::std::mem::offset_of!(mpc_lintsrc, type_) - 0usize];
["Offset of field: mpc_lintsrc::irqtype"]
[::std::mem::offset_of!(mpc_lintsrc, irqtype) - 1usize];
["Offset of field: mpc_lintsrc::irqflag"]
[::std::mem::offset_of!(mpc_lintsrc, irqflag) - 2usize];
["Offset of field: mpc_lintsrc::srcbusid"]
[::std::mem::offset_of!(mpc_lintsrc, srcbusid) - 4usize];
["Offset of field: mpc_lintsrc::srcbusirq"]
[::std::mem::offset_of!(mpc_lintsrc, srcbusirq) - 5usize];
["Offset of field: mpc_lintsrc::destapic"]
[::std::mem::offset_of!(mpc_lintsrc, destapic) - 6usize];
["Offset of field: mpc_lintsrc::destapiclint"]
[::std::mem::offset_of!(mpc_lintsrc, destapiclint) - 7usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct mpc_oemtable {
pub signature: [::std::os::raw::c_char; 4usize],
pub length: ::std::os::raw::c_ushort,
pub rev: ::std::os::raw::c_char,
pub checksum: ::std::os::raw::c_char,
pub mpc: [::std::os::raw::c_char; 8usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of mpc_oemtable"][::std::mem::size_of::<mpc_oemtable>() - 16usize];
["Alignment of mpc_oemtable"][::std::mem::align_of::<mpc_oemtable>() - 2usize];
["Offset of field: mpc_oemtable::signature"]
[::std::mem::offset_of!(mpc_oemtable, signature) - 0usize];
["Offset of field: mpc_oemtable::length"]
[::std::mem::offset_of!(mpc_oemtable, length) - 4usize];
["Offset of field: mpc_oemtable::rev"][::std::mem::offset_of!(mpc_oemtable, rev) - 6usize];
["Offset of field: mpc_oemtable::checksum"]
[::std::mem::offset_of!(mpc_oemtable, checksum) - 7usize];
["Offset of field: mpc_oemtable::mpc"][::std::mem::offset_of!(mpc_oemtable, mpc) - 8usize];
};
pub mod mp_bustype {
pub type Type = ::std::os::raw::c_uint;
pub const MP_BUS_ISA: Type = 1;
pub const MP_BUS_EISA: Type = 2;
pub const MP_BUS_PCI: Type = 3;
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/generated/msr_index.rs | src/vmm/src/arch/x86_64/generated/msr_index.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const MSR_EFER: u32 = 0xc0000080;
pub const MSR_STAR: u32 = 0xc0000081;
pub const MSR_LSTAR: u32 = 0xc0000082;
pub const MSR_CSTAR: u32 = 0xc0000083;
pub const MSR_SYSCALL_MASK: u32 = 0xc0000084;
pub const MSR_FS_BASE: u32 = 0xc0000100;
pub const MSR_GS_BASE: u32 = 0xc0000101;
pub const MSR_KERNEL_GS_BASE: u32 = 0xc0000102;
pub const MSR_TSC_AUX: u32 = 0xc0000103;
pub const MSR_IA32_FRED_RSP0: u32 = 0x1cc;
pub const MSR_IA32_FRED_RSP1: u32 = 0x1cd;
pub const MSR_IA32_FRED_RSP2: u32 = 0x1ce;
pub const MSR_IA32_FRED_RSP3: u32 = 0x1cf;
pub const MSR_IA32_FRED_STKLVLS: u32 = 0x1d0;
pub const MSR_IA32_FRED_SSP1: u32 = 0x1d1;
pub const MSR_IA32_FRED_SSP2: u32 = 0x1d2;
pub const MSR_IA32_FRED_SSP3: u32 = 0x1d3;
pub const MSR_IA32_FRED_CONFIG: u32 = 0x1d4;
pub const MSR_TEST_CTRL: u32 = 0x33;
pub const MSR_TEST_CTRL_SPLIT_LOCK_DETECT_BIT: u32 = 0x1d;
pub const MSR_IA32_SPEC_CTRL: u32 = 0x48;
pub const MSR_IA32_PRED_CMD: u32 = 0x49;
pub const MSR_PPIN_CTL: u32 = 0x4e;
pub const MSR_PPIN: u32 = 0x4f;
pub const MSR_IA32_PERFCTR0: u32 = 0xc1;
pub const MSR_IA32_PERFCTR1: u32 = 0xc2;
pub const MSR_FSB_FREQ: u32 = 0xcd;
pub const MSR_PLATFORM_INFO: u32 = 0xce;
pub const MSR_PLATFORM_INFO_CPUID_FAULT_BIT: u32 = 0x1f;
pub const MSR_IA32_UMWAIT_CONTROL: u32 = 0xe1;
pub const MSR_IA32_UMWAIT_CONTROL_TIME_MASK: i32 = -4;
pub const MSR_IA32_CORE_CAPS: u32 = 0xcf;
pub const MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT: u32 = 0x2;
pub const MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT: u32 = 0x5;
pub const MSR_PKG_CST_CONFIG_CONTROL: u32 = 0xe2;
pub const MSR_MTRRcap: u32 = 0xfe;
pub const MSR_IA32_ARCH_CAPABILITIES: u32 = 0x10a;
pub const MSR_IA32_FLUSH_CMD: u32 = 0x10b;
pub const MSR_IA32_BBL_CR_CTL: u32 = 0x119;
pub const MSR_IA32_BBL_CR_CTL3: u32 = 0x11e;
pub const MSR_IA32_TSX_CTRL: u32 = 0x122;
pub const MSR_IA32_MCU_OPT_CTRL: u32 = 0x123;
pub const MSR_IA32_SYSENTER_CS: u32 = 0x174;
pub const MSR_IA32_SYSENTER_ESP: u32 = 0x175;
pub const MSR_IA32_SYSENTER_EIP: u32 = 0x176;
pub const MSR_IA32_MCG_CAP: u32 = 0x179;
pub const MSR_IA32_MCG_STATUS: u32 = 0x17a;
pub const MSR_IA32_MCG_CTL: u32 = 0x17b;
pub const MSR_ERROR_CONTROL: u32 = 0x17f;
pub const MSR_IA32_MCG_EXT_CTL: u32 = 0x4d0;
pub const MSR_OFFCORE_RSP_0: u32 = 0x1a6;
pub const MSR_OFFCORE_RSP_1: u32 = 0x1a7;
pub const MSR_TURBO_RATIO_LIMIT: u32 = 0x1ad;
pub const MSR_TURBO_RATIO_LIMIT1: u32 = 0x1ae;
pub const MSR_TURBO_RATIO_LIMIT2: u32 = 0x1af;
pub const MSR_SNOOP_RSP_0: u32 = 0x1328;
pub const MSR_SNOOP_RSP_1: u32 = 0x1329;
pub const MSR_LBR_SELECT: u32 = 0x1c8;
pub const MSR_LBR_TOS: u32 = 0x1c9;
pub const MSR_IA32_POWER_CTL: u32 = 0x1fc;
pub const MSR_IA32_POWER_CTL_BIT_EE: u32 = 0x13;
pub const MSR_INTEGRITY_CAPS: u32 = 0x2d9;
pub const MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT: u32 = 0x2;
pub const MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT: u32 = 0x4;
pub const MSR_INTEGRITY_CAPS_SBAF_BIT: u32 = 0x8;
pub const MSR_LBR_NHM_FROM: u32 = 0x680;
pub const MSR_LBR_NHM_TO: u32 = 0x6c0;
pub const MSR_LBR_CORE_FROM: u32 = 0x40;
pub const MSR_LBR_CORE_TO: u32 = 0x60;
pub const MSR_LBR_INFO_0: u32 = 0xdc0;
pub const MSR_ARCH_LBR_CTL: u32 = 0x14ce;
pub const MSR_ARCH_LBR_DEPTH: u32 = 0x14cf;
pub const MSR_ARCH_LBR_FROM_0: u32 = 0x1500;
pub const MSR_ARCH_LBR_TO_0: u32 = 0x1600;
pub const MSR_ARCH_LBR_INFO_0: u32 = 0x1200;
pub const MSR_IA32_PEBS_ENABLE: u32 = 0x3f1;
pub const MSR_PEBS_DATA_CFG: u32 = 0x3f2;
pub const MSR_IA32_DS_AREA: u32 = 0x600;
pub const MSR_IA32_PERF_CAPABILITIES: u32 = 0x345;
pub const MSR_PEBS_LD_LAT_THRESHOLD: u32 = 0x3f6;
pub const MSR_IA32_RTIT_CTL: u32 = 0x570;
pub const MSR_IA32_RTIT_STATUS: u32 = 0x571;
pub const MSR_IA32_RTIT_ADDR0_A: u32 = 0x580;
pub const MSR_IA32_RTIT_ADDR0_B: u32 = 0x581;
pub const MSR_IA32_RTIT_ADDR1_A: u32 = 0x582;
pub const MSR_IA32_RTIT_ADDR1_B: u32 = 0x583;
pub const MSR_IA32_RTIT_ADDR2_A: u32 = 0x584;
pub const MSR_IA32_RTIT_ADDR2_B: u32 = 0x585;
pub const MSR_IA32_RTIT_ADDR3_A: u32 = 0x586;
pub const MSR_IA32_RTIT_ADDR3_B: u32 = 0x587;
pub const MSR_IA32_RTIT_CR3_MATCH: u32 = 0x572;
pub const MSR_IA32_RTIT_OUTPUT_BASE: u32 = 0x560;
pub const MSR_IA32_RTIT_OUTPUT_MASK: u32 = 0x561;
pub const MSR_MTRRfix64K_00000: u32 = 0x250;
pub const MSR_MTRRfix16K_80000: u32 = 0x258;
pub const MSR_MTRRfix16K_A0000: u32 = 0x259;
pub const MSR_MTRRfix4K_C0000: u32 = 0x268;
pub const MSR_MTRRfix4K_C8000: u32 = 0x269;
pub const MSR_MTRRfix4K_D0000: u32 = 0x26a;
pub const MSR_MTRRfix4K_D8000: u32 = 0x26b;
pub const MSR_MTRRfix4K_E0000: u32 = 0x26c;
pub const MSR_MTRRfix4K_E8000: u32 = 0x26d;
pub const MSR_MTRRfix4K_F0000: u32 = 0x26e;
pub const MSR_MTRRfix4K_F8000: u32 = 0x26f;
pub const MSR_MTRRdefType: u32 = 0x2ff;
pub const MSR_IA32_CR_PAT: u32 = 0x277;
pub const MSR_IA32_DEBUGCTLMSR: u32 = 0x1d9;
pub const MSR_IA32_LASTBRANCHFROMIP: u32 = 0x1db;
pub const MSR_IA32_LASTBRANCHTOIP: u32 = 0x1dc;
pub const MSR_IA32_LASTINTFROMIP: u32 = 0x1dd;
pub const MSR_IA32_LASTINTTOIP: u32 = 0x1de;
pub const MSR_IA32_PASID: u32 = 0xd93;
pub const MSR_PEBS_FRONTEND: u32 = 0x3f7;
pub const MSR_IA32_MC0_CTL: u32 = 0x400;
pub const MSR_IA32_MC0_STATUS: u32 = 0x401;
pub const MSR_IA32_MC0_ADDR: u32 = 0x402;
pub const MSR_IA32_MC0_MISC: u32 = 0x403;
pub const MSR_PKG_C3_RESIDENCY: u32 = 0x3f8;
pub const MSR_PKG_C6_RESIDENCY: u32 = 0x3f9;
pub const MSR_ATOM_PKG_C6_RESIDENCY: u32 = 0x3fa;
pub const MSR_PKG_C7_RESIDENCY: u32 = 0x3fa;
pub const MSR_CORE_C3_RESIDENCY: u32 = 0x3fc;
pub const MSR_CORE_C6_RESIDENCY: u32 = 0x3fd;
pub const MSR_CORE_C7_RESIDENCY: u32 = 0x3fe;
pub const MSR_KNL_CORE_C6_RESIDENCY: u32 = 0x3ff;
pub const MSR_PKG_C2_RESIDENCY: u32 = 0x60d;
pub const MSR_PKG_C8_RESIDENCY: u32 = 0x630;
pub const MSR_PKG_C9_RESIDENCY: u32 = 0x631;
pub const MSR_PKG_C10_RESIDENCY: u32 = 0x632;
pub const MSR_PKGC3_IRTL: u32 = 0x60a;
pub const MSR_PKGC6_IRTL: u32 = 0x60b;
pub const MSR_PKGC7_IRTL: u32 = 0x60c;
pub const MSR_PKGC8_IRTL: u32 = 0x633;
pub const MSR_PKGC9_IRTL: u32 = 0x634;
pub const MSR_PKGC10_IRTL: u32 = 0x635;
pub const MSR_VR_CURRENT_CONFIG: u32 = 0x601;
pub const MSR_RAPL_POWER_UNIT: u32 = 0x606;
pub const MSR_PKG_POWER_LIMIT: u32 = 0x610;
pub const MSR_PKG_ENERGY_STATUS: u32 = 0x611;
pub const MSR_PKG_PERF_STATUS: u32 = 0x613;
pub const MSR_PKG_POWER_INFO: u32 = 0x614;
pub const MSR_DRAM_POWER_LIMIT: u32 = 0x618;
pub const MSR_DRAM_ENERGY_STATUS: u32 = 0x619;
pub const MSR_DRAM_PERF_STATUS: u32 = 0x61b;
pub const MSR_DRAM_POWER_INFO: u32 = 0x61c;
pub const MSR_PP0_POWER_LIMIT: u32 = 0x638;
pub const MSR_PP0_ENERGY_STATUS: u32 = 0x639;
pub const MSR_PP0_POLICY: u32 = 0x63a;
pub const MSR_PP0_PERF_STATUS: u32 = 0x63b;
pub const MSR_PP1_POWER_LIMIT: u32 = 0x640;
pub const MSR_PP1_ENERGY_STATUS: u32 = 0x641;
pub const MSR_PP1_POLICY: u32 = 0x642;
pub const MSR_AMD_RAPL_POWER_UNIT: u32 = 0xc0010299;
pub const MSR_AMD_CORE_ENERGY_STATUS: u32 = 0xc001029a;
pub const MSR_AMD_PKG_ENERGY_STATUS: u32 = 0xc001029b;
pub const MSR_CONFIG_TDP_NOMINAL: u32 = 0x648;
pub const MSR_CONFIG_TDP_LEVEL_1: u32 = 0x649;
pub const MSR_CONFIG_TDP_LEVEL_2: u32 = 0x64a;
pub const MSR_CONFIG_TDP_CONTROL: u32 = 0x64b;
pub const MSR_TURBO_ACTIVATION_RATIO: u32 = 0x64c;
pub const MSR_PLATFORM_ENERGY_STATUS: u32 = 0x64d;
pub const MSR_SECONDARY_TURBO_RATIO_LIMIT: u32 = 0x650;
pub const MSR_PKG_WEIGHTED_CORE_C0_RES: u32 = 0x658;
pub const MSR_PKG_ANY_CORE_C0_RES: u32 = 0x659;
pub const MSR_PKG_ANY_GFXE_C0_RES: u32 = 0x65a;
pub const MSR_PKG_BOTH_CORE_GFXE_C0_RES: u32 = 0x65b;
pub const MSR_CORE_C1_RES: u32 = 0x660;
pub const MSR_MODULE_C6_RES_MS: u32 = 0x664;
pub const MSR_CC6_DEMOTION_POLICY_CONFIG: u32 = 0x668;
pub const MSR_MC6_DEMOTION_POLICY_CONFIG: u32 = 0x669;
pub const MSR_ATOM_CORE_RATIOS: u32 = 0x66a;
pub const MSR_ATOM_CORE_VIDS: u32 = 0x66b;
pub const MSR_ATOM_CORE_TURBO_RATIOS: u32 = 0x66c;
pub const MSR_ATOM_CORE_TURBO_VIDS: u32 = 0x66d;
pub const MSR_CORE_PERF_LIMIT_REASONS: u32 = 0x690;
pub const MSR_GFX_PERF_LIMIT_REASONS: u32 = 0x6b0;
pub const MSR_RING_PERF_LIMIT_REASONS: u32 = 0x6b1;
pub const MSR_IA32_U_CET: u32 = 0x6a0;
pub const MSR_IA32_S_CET: u32 = 0x6a2;
pub const MSR_IA32_PL0_SSP: u32 = 0x6a4;
pub const MSR_IA32_PL1_SSP: u32 = 0x6a5;
pub const MSR_IA32_PL2_SSP: u32 = 0x6a6;
pub const MSR_IA32_PL3_SSP: u32 = 0x6a7;
pub const MSR_IA32_INT_SSP_TAB: u32 = 0x6a8;
pub const MSR_PPERF: u32 = 0x64e;
pub const MSR_PERF_LIMIT_REASONS: u32 = 0x64f;
pub const MSR_PM_ENABLE: u32 = 0x770;
pub const MSR_HWP_CAPABILITIES: u32 = 0x771;
pub const MSR_HWP_REQUEST_PKG: u32 = 0x772;
pub const MSR_HWP_INTERRUPT: u32 = 0x773;
pub const MSR_HWP_REQUEST: u32 = 0x774;
pub const MSR_HWP_STATUS: u32 = 0x777;
pub const MSR_AMD64_MC0_MASK: u32 = 0xc0010044;
pub const MSR_IA32_MC0_CTL2: u32 = 0x280;
pub const MSR_P6_PERFCTR0: u32 = 0xc1;
pub const MSR_P6_PERFCTR1: u32 = 0xc2;
pub const MSR_P6_EVNTSEL0: u32 = 0x186;
pub const MSR_P6_EVNTSEL1: u32 = 0x187;
pub const MSR_KNC_PERFCTR0: u32 = 0x20;
pub const MSR_KNC_PERFCTR1: u32 = 0x21;
pub const MSR_KNC_EVNTSEL0: u32 = 0x28;
pub const MSR_KNC_EVNTSEL1: u32 = 0x29;
pub const MSR_IA32_PMC0: u32 = 0x4c1;
pub const MSR_RELOAD_PMC0: u32 = 0x14c1;
pub const MSR_RELOAD_FIXED_CTR0: u32 = 0x1309;
pub const MSR_IA32_PMC_V6_GP0_CTR: u32 = 0x1900;
pub const MSR_IA32_PMC_V6_GP0_CFG_A: u32 = 0x1901;
pub const MSR_IA32_PMC_V6_FX0_CTR: u32 = 0x1980;
pub const MSR_IA32_PMC_V6_STEP: u32 = 0x4;
pub const MSR_IA32_MKTME_KEYID_PARTITIONING: u32 = 0x87;
pub const MSR_AMD64_PATCH_LEVEL: u32 = 0x8b;
pub const MSR_AMD64_TSC_RATIO: u32 = 0xc0000104;
pub const MSR_AMD64_NB_CFG: u32 = 0xc001001f;
pub const MSR_AMD64_PATCH_LOADER: u32 = 0xc0010020;
pub const MSR_AMD_PERF_CTL: u32 = 0xc0010062;
pub const MSR_AMD_PERF_STATUS: u32 = 0xc0010063;
pub const MSR_AMD_PSTATE_DEF_BASE: u32 = 0xc0010064;
pub const MSR_AMD64_OSVW_ID_LENGTH: u32 = 0xc0010140;
pub const MSR_AMD64_OSVW_STATUS: u32 = 0xc0010141;
pub const MSR_AMD_PPIN_CTL: u32 = 0xc00102f0;
pub const MSR_AMD_PPIN: u32 = 0xc00102f1;
pub const MSR_AMD64_CPUID_FN_1: u32 = 0xc0011004;
pub const MSR_AMD64_LS_CFG: u32 = 0xc0011020;
pub const MSR_AMD64_DC_CFG: u32 = 0xc0011022;
pub const MSR_AMD64_TW_CFG: u32 = 0xc0011023;
pub const MSR_AMD64_DE_CFG: u32 = 0xc0011029;
pub const MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT: u32 = 0x1;
pub const MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT: u32 = 0x9;
pub const MSR_AMD64_BU_CFG2: u32 = 0xc001102a;
pub const MSR_AMD64_IBSFETCHCTL: u32 = 0xc0011030;
pub const MSR_AMD64_IBSFETCHLINAD: u32 = 0xc0011031;
pub const MSR_AMD64_IBSFETCHPHYSAD: u32 = 0xc0011032;
pub const MSR_AMD64_IBSFETCH_REG_COUNT: u32 = 0x3;
pub const MSR_AMD64_IBSFETCH_REG_MASK: u32 = 0x7;
pub const MSR_AMD64_IBSOPCTL: u32 = 0xc0011033;
pub const MSR_AMD64_IBSOPRIP: u32 = 0xc0011034;
pub const MSR_AMD64_IBSOPDATA: u32 = 0xc0011035;
pub const MSR_AMD64_IBSOPDATA2: u32 = 0xc0011036;
pub const MSR_AMD64_IBSOPDATA3: u32 = 0xc0011037;
pub const MSR_AMD64_IBSDCLINAD: u32 = 0xc0011038;
pub const MSR_AMD64_IBSDCPHYSAD: u32 = 0xc0011039;
pub const MSR_AMD64_IBSOP_REG_COUNT: u32 = 0x7;
pub const MSR_AMD64_IBSOP_REG_MASK: u32 = 0x7f;
pub const MSR_AMD64_IBSCTL: u32 = 0xc001103a;
pub const MSR_AMD64_IBSBRTARGET: u32 = 0xc001103b;
pub const MSR_AMD64_ICIBSEXTDCTL: u32 = 0xc001103c;
pub const MSR_AMD64_IBSOPDATA4: u32 = 0xc001103d;
pub const MSR_AMD64_IBS_REG_COUNT_MAX: u32 = 0x8;
pub const MSR_AMD64_SVM_AVIC_DOORBELL: u32 = 0xc001011b;
pub const MSR_AMD64_VM_PAGE_FLUSH: u32 = 0xc001011e;
pub const MSR_AMD64_SEV_ES_GHCB: u32 = 0xc0010130;
pub const MSR_AMD64_SEV: u32 = 0xc0010131;
pub const MSR_AMD64_SEV_ENABLED_BIT: u32 = 0x0;
pub const MSR_AMD64_SEV_ES_ENABLED_BIT: u32 = 0x1;
pub const MSR_AMD64_SEV_SNP_ENABLED_BIT: u32 = 0x2;
pub const MSR_AMD64_SNP_VTOM_BIT: u32 = 0x3;
pub const MSR_AMD64_SNP_REFLECT_VC_BIT: u32 = 0x4;
pub const MSR_AMD64_SNP_RESTRICTED_INJ_BIT: u32 = 0x5;
pub const MSR_AMD64_SNP_ALT_INJ_BIT: u32 = 0x6;
pub const MSR_AMD64_SNP_DEBUG_SWAP_BIT: u32 = 0x7;
pub const MSR_AMD64_SNP_PREVENT_HOST_IBS_BIT: u32 = 0x8;
pub const MSR_AMD64_SNP_BTB_ISOLATION_BIT: u32 = 0x9;
pub const MSR_AMD64_SNP_VMPL_SSS_BIT: u32 = 0xa;
pub const MSR_AMD64_SNP_SECURE_TSC_BIT: u32 = 0xb;
pub const MSR_AMD64_SNP_VMGEXIT_PARAM_BIT: u32 = 0xc;
pub const MSR_AMD64_SNP_IBS_VIRT_BIT: u32 = 0xe;
pub const MSR_AMD64_SNP_VMSA_REG_PROT_BIT: u32 = 0x10;
pub const MSR_AMD64_SNP_SMT_PROT_BIT: u32 = 0x11;
pub const MSR_AMD64_SNP_RESV_BIT: u32 = 0x12;
pub const MSR_AMD64_VIRT_SPEC_CTRL: u32 = 0xc001011f;
pub const MSR_AMD64_RMP_BASE: u32 = 0xc0010132;
pub const MSR_AMD64_RMP_END: u32 = 0xc0010133;
pub const MSR_SVSM_CAA: u32 = 0xc001f000;
pub const MSR_AMD_CPPC_CAP1: u32 = 0xc00102b0;
pub const MSR_AMD_CPPC_ENABLE: u32 = 0xc00102b1;
pub const MSR_AMD_CPPC_CAP2: u32 = 0xc00102b2;
pub const MSR_AMD_CPPC_REQ: u32 = 0xc00102b3;
pub const MSR_AMD_CPPC_STATUS: u32 = 0xc00102b4;
pub const MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: u32 = 0xc0000300;
pub const MSR_AMD64_PERF_CNTR_GLOBAL_CTL: u32 = 0xc0000301;
pub const MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: u32 = 0xc0000302;
pub const MSR_AMD64_LBR_SELECT: u32 = 0xc000010e;
pub const MSR_ZEN4_BP_CFG: u32 = 0xc001102e;
pub const MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT: u32 = 0x5;
pub const MSR_F19H_UMC_PERF_CTL: u32 = 0xc0010800;
pub const MSR_F19H_UMC_PERF_CTR: u32 = 0xc0010801;
pub const MSR_ZEN2_SPECTRAL_CHICKEN: u32 = 0xc00110e3;
pub const MSR_F17H_IRPERF: u32 = 0xc00000e9;
pub const MSR_F16H_L2I_PERF_CTL: u32 = 0xc0010230;
pub const MSR_F16H_L2I_PERF_CTR: u32 = 0xc0010231;
pub const MSR_F16H_DR1_ADDR_MASK: u32 = 0xc0011019;
pub const MSR_F16H_DR2_ADDR_MASK: u32 = 0xc001101a;
pub const MSR_F16H_DR3_ADDR_MASK: u32 = 0xc001101b;
pub const MSR_F16H_DR0_ADDR_MASK: u32 = 0xc0011027;
pub const MSR_F15H_CU_PWR_ACCUMULATOR: u32 = 0xc001007a;
pub const MSR_F15H_CU_MAX_PWR_ACCUMULATOR: u32 = 0xc001007b;
pub const MSR_F15H_PERF_CTL: u32 = 0xc0010200;
pub const MSR_F15H_PERF_CTL0: u32 = 0xc0010200;
pub const MSR_F15H_PERF_CTL1: u32 = 0xc0010202;
pub const MSR_F15H_PERF_CTL2: u32 = 0xc0010204;
pub const MSR_F15H_PERF_CTL3: u32 = 0xc0010206;
pub const MSR_F15H_PERF_CTL4: u32 = 0xc0010208;
pub const MSR_F15H_PERF_CTL5: u32 = 0xc001020a;
pub const MSR_F15H_PERF_CTR: u32 = 0xc0010201;
pub const MSR_F15H_PERF_CTR0: u32 = 0xc0010201;
pub const MSR_F15H_PERF_CTR1: u32 = 0xc0010203;
pub const MSR_F15H_PERF_CTR2: u32 = 0xc0010205;
pub const MSR_F15H_PERF_CTR3: u32 = 0xc0010207;
pub const MSR_F15H_PERF_CTR4: u32 = 0xc0010209;
pub const MSR_F15H_PERF_CTR5: u32 = 0xc001020b;
pub const MSR_F15H_NB_PERF_CTL: u32 = 0xc0010240;
pub const MSR_F15H_NB_PERF_CTR: u32 = 0xc0010241;
pub const MSR_F15H_PTSC: u32 = 0xc0010280;
pub const MSR_F15H_IC_CFG: u32 = 0xc0011021;
pub const MSR_F15H_EX_CFG: u32 = 0xc001102c;
pub const MSR_FAM10H_MMIO_CONF_BASE: u32 = 0xc0010058;
pub const MSR_FAM10H_NODE_ID: u32 = 0xc001100c;
pub const MSR_K8_TOP_MEM1: u32 = 0xc001001a;
pub const MSR_K8_TOP_MEM2: u32 = 0xc001001d;
pub const MSR_AMD64_SYSCFG: u32 = 0xc0010010;
pub const MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT: u32 = 0x17;
pub const MSR_AMD64_SYSCFG_SNP_EN_BIT: u32 = 0x18;
pub const MSR_AMD64_SYSCFG_SNP_VMPL_EN_BIT: u32 = 0x19;
pub const MSR_AMD64_SYSCFG_MFDM_BIT: u32 = 0x13;
pub const MSR_K8_INT_PENDING_MSG: u32 = 0xc0010055;
pub const MSR_K8_TSEG_ADDR: u32 = 0xc0010112;
pub const MSR_K8_TSEG_MASK: u32 = 0xc0010113;
pub const MSR_K7_EVNTSEL0: u32 = 0xc0010000;
pub const MSR_K7_PERFCTR0: u32 = 0xc0010004;
pub const MSR_K7_EVNTSEL1: u32 = 0xc0010001;
pub const MSR_K7_PERFCTR1: u32 = 0xc0010005;
pub const MSR_K7_EVNTSEL2: u32 = 0xc0010002;
pub const MSR_K7_PERFCTR2: u32 = 0xc0010006;
pub const MSR_K7_EVNTSEL3: u32 = 0xc0010003;
pub const MSR_K7_PERFCTR3: u32 = 0xc0010007;
pub const MSR_K7_CLK_CTL: u32 = 0xc001001b;
pub const MSR_K7_HWCR: u32 = 0xc0010015;
pub const MSR_K7_HWCR_SMMLOCK_BIT: u32 = 0x0;
pub const MSR_K7_HWCR_IRPERF_EN_BIT: u32 = 0x1e;
pub const MSR_K7_FID_VID_CTL: u32 = 0xc0010041;
pub const MSR_K7_FID_VID_STATUS: u32 = 0xc0010042;
pub const MSR_K7_HWCR_CPB_DIS_BIT: u32 = 0x19;
pub const MSR_K6_WHCR: u32 = 0xc0000082;
pub const MSR_K6_UWCCR: u32 = 0xc0000085;
pub const MSR_K6_EPMR: u32 = 0xc0000086;
pub const MSR_K6_PSOR: u32 = 0xc0000087;
pub const MSR_K6_PFIR: u32 = 0xc0000088;
pub const MSR_IDT_FCR1: u32 = 0x107;
pub const MSR_IDT_FCR2: u32 = 0x108;
pub const MSR_IDT_FCR3: u32 = 0x109;
pub const MSR_IDT_FCR4: u32 = 0x10a;
pub const MSR_IDT_MCR0: u32 = 0x110;
pub const MSR_IDT_MCR1: u32 = 0x111;
pub const MSR_IDT_MCR2: u32 = 0x112;
pub const MSR_IDT_MCR3: u32 = 0x113;
pub const MSR_IDT_MCR4: u32 = 0x114;
pub const MSR_IDT_MCR5: u32 = 0x115;
pub const MSR_IDT_MCR6: u32 = 0x116;
pub const MSR_IDT_MCR7: u32 = 0x117;
pub const MSR_IDT_MCR_CTRL: u32 = 0x120;
pub const MSR_VIA_FCR: u32 = 0x1107;
pub const MSR_VIA_LONGHAUL: u32 = 0x110a;
pub const MSR_VIA_RNG: u32 = 0x110b;
pub const MSR_VIA_BCR2: u32 = 0x1147;
pub const MSR_TMTA_LONGRUN_CTRL: u32 = 0x80868010;
pub const MSR_TMTA_LONGRUN_FLAGS: u32 = 0x80868011;
pub const MSR_TMTA_LRTI_READOUT: u32 = 0x80868018;
pub const MSR_TMTA_LRTI_VOLT_MHZ: u32 = 0x8086801a;
pub const MSR_IA32_P5_MC_ADDR: u32 = 0x0;
pub const MSR_IA32_P5_MC_TYPE: u32 = 0x1;
pub const MSR_IA32_TSC: u32 = 0x10;
pub const MSR_IA32_PLATFORM_ID: u32 = 0x17;
pub const MSR_IA32_EBL_CR_POWERON: u32 = 0x2a;
pub const MSR_EBC_FREQUENCY_ID: u32 = 0x2c;
pub const MSR_SMI_COUNT: u32 = 0x34;
pub const MSR_IA32_FEAT_CTL: u32 = 0x3a;
pub const MSR_IA32_TSC_ADJUST: u32 = 0x3b;
pub const MSR_IA32_BNDCFGS: u32 = 0xd90;
pub const MSR_IA32_BNDCFGS_RSVD: u32 = 0xffc;
pub const MSR_IA32_XFD: u32 = 0x1c4;
pub const MSR_IA32_XFD_ERR: u32 = 0x1c5;
pub const MSR_IA32_XSS: u32 = 0xda0;
pub const MSR_IA32_APICBASE: u32 = 0x1b;
pub const MSR_IA32_APICBASE_BSP: u32 = 0x100;
pub const MSR_IA32_APICBASE_ENABLE: u32 = 0x800;
pub const MSR_IA32_APICBASE_BASE: u32 = 0xfffff000;
pub const MSR_IA32_UCODE_WRITE: u32 = 0x79;
pub const MSR_IA32_UCODE_REV: u32 = 0x8b;
pub const MSR_IA32_SGXLEPUBKEYHASH0: u32 = 0x8c;
pub const MSR_IA32_SGXLEPUBKEYHASH1: u32 = 0x8d;
pub const MSR_IA32_SGXLEPUBKEYHASH2: u32 = 0x8e;
pub const MSR_IA32_SGXLEPUBKEYHASH3: u32 = 0x8f;
pub const MSR_IA32_SMM_MONITOR_CTL: u32 = 0x9b;
pub const MSR_IA32_SMBASE: u32 = 0x9e;
pub const MSR_IA32_PERF_STATUS: u32 = 0x198;
pub const MSR_IA32_PERF_CTL: u32 = 0x199;
pub const MSR_AMD_DBG_EXTN_CFG: u32 = 0xc000010f;
pub const MSR_AMD_SAMP_BR_FROM: u32 = 0xc0010300;
pub const MSR_IA32_MPERF: u32 = 0xe7;
pub const MSR_IA32_APERF: u32 = 0xe8;
pub const MSR_IA32_THERM_CONTROL: u32 = 0x19a;
pub const MSR_IA32_THERM_INTERRUPT: u32 = 0x19b;
pub const MSR_IA32_THERM_STATUS: u32 = 0x19c;
pub const MSR_THERM2_CTL: u32 = 0x19d;
pub const MSR_THERM2_CTL_TM_SELECT: u32 = 0x10000;
pub const MSR_IA32_MISC_ENABLE: u32 = 0x1a0;
pub const MSR_IA32_TEMPERATURE_TARGET: u32 = 0x1a2;
pub const MSR_MISC_FEATURE_CONTROL: u32 = 0x1a4;
pub const MSR_MISC_PWR_MGMT: u32 = 0x1aa;
pub const MSR_IA32_ENERGY_PERF_BIAS: u32 = 0x1b0;
pub const MSR_IA32_PACKAGE_THERM_STATUS: u32 = 0x1b1;
pub const MSR_IA32_PACKAGE_THERM_INTERRUPT: u32 = 0x1b2;
pub const MSR_IA32_MISC_ENABLE_FAST_STRING_BIT: u32 = 0x0;
pub const MSR_IA32_MISC_ENABLE_FAST_STRING: u32 = 0x1;
pub const MSR_IA32_MISC_ENABLE_TCC_BIT: u32 = 0x1;
pub const MSR_IA32_MISC_ENABLE_TCC: u32 = 0x2;
pub const MSR_IA32_MISC_ENABLE_EMON_BIT: u32 = 0x7;
pub const MSR_IA32_MISC_ENABLE_EMON: u32 = 0x80;
pub const MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT: u32 = 0xb;
pub const MSR_IA32_MISC_ENABLE_BTS_UNAVAIL: u32 = 0x800;
pub const MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT: u32 = 0xc;
pub const MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL: u32 = 0x1000;
pub const MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT: u32 = 0x10;
pub const MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP: u32 = 0x10000;
pub const MSR_IA32_MISC_ENABLE_MWAIT_BIT: u32 = 0x12;
pub const MSR_IA32_MISC_ENABLE_MWAIT: u32 = 0x40000;
pub const MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT: u32 = 0x16;
pub const MSR_IA32_MISC_ENABLE_LIMIT_CPUID: u32 = 0x400000;
pub const MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT: u32 = 0x17;
pub const MSR_IA32_MISC_ENABLE_XTPR_DISABLE: u32 = 0x800000;
pub const MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT: u32 = 0x22;
pub const MSR_IA32_MISC_ENABLE_XD_DISABLE: u64 = 0x400000000;
pub const MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT: u32 = 0x2;
pub const MSR_IA32_MISC_ENABLE_X87_COMPAT: u32 = 0x4;
pub const MSR_IA32_MISC_ENABLE_TM1_BIT: u32 = 0x3;
pub const MSR_IA32_MISC_ENABLE_TM1: u32 = 0x8;
pub const MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT: u32 = 0x4;
pub const MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE: u32 = 0x10;
pub const MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT: u32 = 0x6;
pub const MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE: u32 = 0x40;
pub const MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT: u32 = 0x8;
pub const MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK: u32 = 0x100;
pub const MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT: u32 = 0x9;
pub const MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE: u32 = 0x200;
pub const MSR_IA32_MISC_ENABLE_FERR_BIT: u32 = 0xa;
pub const MSR_IA32_MISC_ENABLE_FERR: u32 = 0x400;
pub const MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT: u32 = 0xa;
pub const MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX: u32 = 0x400;
pub const MSR_IA32_MISC_ENABLE_TM2_BIT: u32 = 0xd;
pub const MSR_IA32_MISC_ENABLE_TM2: u32 = 0x2000;
pub const MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT: u32 = 0x13;
pub const MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE: u32 = 0x80000;
pub const MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT: u32 = 0x14;
pub const MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK: u32 = 0x100000;
pub const MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT: u32 = 0x18;
pub const MSR_IA32_MISC_ENABLE_L1D_CONTEXT: u32 = 0x1000000;
pub const MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT: u32 = 0x25;
pub const MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE: u64 = 0x2000000000;
pub const MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT: u32 = 0x26;
pub const MSR_IA32_MISC_ENABLE_TURBO_DISABLE: u64 = 0x4000000000;
pub const MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT: u32 = 0x27;
pub const MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE: u64 = 0x8000000000;
pub const MSR_MISC_FEATURES_ENABLES: u32 = 0x140;
pub const MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT: u32 = 0x0;
pub const MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT: u32 = 0x1;
pub const MSR_IA32_TSC_DEADLINE: u32 = 0x6e0;
pub const MSR_TSX_FORCE_ABORT: u32 = 0x10f;
pub const MSR_TFA_RTM_FORCE_ABORT_BIT: u32 = 0x0;
pub const MSR_TFA_TSX_CPUID_CLEAR_BIT: u32 = 0x1;
pub const MSR_TFA_SDV_ENABLE_RTM_BIT: u32 = 0x2;
pub const MSR_IA32_MCG_EAX: u32 = 0x180;
pub const MSR_IA32_MCG_EBX: u32 = 0x181;
pub const MSR_IA32_MCG_ECX: u32 = 0x182;
pub const MSR_IA32_MCG_EDX: u32 = 0x183;
pub const MSR_IA32_MCG_ESI: u32 = 0x184;
pub const MSR_IA32_MCG_EDI: u32 = 0x185;
pub const MSR_IA32_MCG_EBP: u32 = 0x186;
pub const MSR_IA32_MCG_ESP: u32 = 0x187;
pub const MSR_IA32_MCG_EFLAGS: u32 = 0x188;
pub const MSR_IA32_MCG_EIP: u32 = 0x189;
pub const MSR_IA32_MCG_RESERVED: u32 = 0x18a;
pub const MSR_P4_BPU_PERFCTR0: u32 = 0x300;
pub const MSR_P4_BPU_PERFCTR1: u32 = 0x301;
pub const MSR_P4_BPU_PERFCTR2: u32 = 0x302;
pub const MSR_P4_BPU_PERFCTR3: u32 = 0x303;
pub const MSR_P4_MS_PERFCTR0: u32 = 0x304;
pub const MSR_P4_MS_PERFCTR1: u32 = 0x305;
pub const MSR_P4_MS_PERFCTR2: u32 = 0x306;
pub const MSR_P4_MS_PERFCTR3: u32 = 0x307;
pub const MSR_P4_FLAME_PERFCTR0: u32 = 0x308;
pub const MSR_P4_FLAME_PERFCTR1: u32 = 0x309;
pub const MSR_P4_FLAME_PERFCTR2: u32 = 0x30a;
pub const MSR_P4_FLAME_PERFCTR3: u32 = 0x30b;
pub const MSR_P4_IQ_PERFCTR0: u32 = 0x30c;
pub const MSR_P4_IQ_PERFCTR1: u32 = 0x30d;
pub const MSR_P4_IQ_PERFCTR2: u32 = 0x30e;
pub const MSR_P4_IQ_PERFCTR3: u32 = 0x30f;
pub const MSR_P4_IQ_PERFCTR4: u32 = 0x310;
pub const MSR_P4_IQ_PERFCTR5: u32 = 0x311;
pub const MSR_P4_BPU_CCCR0: u32 = 0x360;
pub const MSR_P4_BPU_CCCR1: u32 = 0x361;
pub const MSR_P4_BPU_CCCR2: u32 = 0x362;
pub const MSR_P4_BPU_CCCR3: u32 = 0x363;
pub const MSR_P4_MS_CCCR0: u32 = 0x364;
pub const MSR_P4_MS_CCCR1: u32 = 0x365;
pub const MSR_P4_MS_CCCR2: u32 = 0x366;
pub const MSR_P4_MS_CCCR3: u32 = 0x367;
pub const MSR_P4_FLAME_CCCR0: u32 = 0x368;
pub const MSR_P4_FLAME_CCCR1: u32 = 0x369;
pub const MSR_P4_FLAME_CCCR2: u32 = 0x36a;
pub const MSR_P4_FLAME_CCCR3: u32 = 0x36b;
pub const MSR_P4_IQ_CCCR0: u32 = 0x36c;
pub const MSR_P4_IQ_CCCR1: u32 = 0x36d;
pub const MSR_P4_IQ_CCCR2: u32 = 0x36e;
pub const MSR_P4_IQ_CCCR3: u32 = 0x36f;
pub const MSR_P4_IQ_CCCR4: u32 = 0x370;
pub const MSR_P4_IQ_CCCR5: u32 = 0x371;
pub const MSR_P4_ALF_ESCR0: u32 = 0x3ca;
pub const MSR_P4_ALF_ESCR1: u32 = 0x3cb;
pub const MSR_P4_BPU_ESCR0: u32 = 0x3b2;
pub const MSR_P4_BPU_ESCR1: u32 = 0x3b3;
pub const MSR_P4_BSU_ESCR0: u32 = 0x3a0;
pub const MSR_P4_BSU_ESCR1: u32 = 0x3a1;
pub const MSR_P4_CRU_ESCR0: u32 = 0x3b8;
pub const MSR_P4_CRU_ESCR1: u32 = 0x3b9;
pub const MSR_P4_CRU_ESCR2: u32 = 0x3cc;
pub const MSR_P4_CRU_ESCR3: u32 = 0x3cd;
pub const MSR_P4_CRU_ESCR4: u32 = 0x3e0;
pub const MSR_P4_CRU_ESCR5: u32 = 0x3e1;
pub const MSR_P4_DAC_ESCR0: u32 = 0x3a8;
pub const MSR_P4_DAC_ESCR1: u32 = 0x3a9;
pub const MSR_P4_FIRM_ESCR0: u32 = 0x3a4;
pub const MSR_P4_FIRM_ESCR1: u32 = 0x3a5;
pub const MSR_P4_FLAME_ESCR0: u32 = 0x3a6;
pub const MSR_P4_FLAME_ESCR1: u32 = 0x3a7;
pub const MSR_P4_FSB_ESCR0: u32 = 0x3a2;
pub const MSR_P4_FSB_ESCR1: u32 = 0x3a3;
pub const MSR_P4_IQ_ESCR0: u32 = 0x3ba;
pub const MSR_P4_IQ_ESCR1: u32 = 0x3bb;
pub const MSR_P4_IS_ESCR0: u32 = 0x3b4;
pub const MSR_P4_IS_ESCR1: u32 = 0x3b5;
pub const MSR_P4_ITLB_ESCR0: u32 = 0x3b6;
pub const MSR_P4_ITLB_ESCR1: u32 = 0x3b7;
pub const MSR_P4_IX_ESCR0: u32 = 0x3c8;
pub const MSR_P4_IX_ESCR1: u32 = 0x3c9;
pub const MSR_P4_MOB_ESCR0: u32 = 0x3aa;
pub const MSR_P4_MOB_ESCR1: u32 = 0x3ab;
pub const MSR_P4_MS_ESCR0: u32 = 0x3c0;
pub const MSR_P4_MS_ESCR1: u32 = 0x3c1;
pub const MSR_P4_PMH_ESCR0: u32 = 0x3ac;
pub const MSR_P4_PMH_ESCR1: u32 = 0x3ad;
pub const MSR_P4_RAT_ESCR0: u32 = 0x3bc;
pub const MSR_P4_RAT_ESCR1: u32 = 0x3bd;
pub const MSR_P4_SAAT_ESCR0: u32 = 0x3ae;
pub const MSR_P4_SAAT_ESCR1: u32 = 0x3af;
pub const MSR_P4_SSU_ESCR0: u32 = 0x3be;
pub const MSR_P4_SSU_ESCR1: u32 = 0x3bf;
pub const MSR_P4_TBPU_ESCR0: u32 = 0x3c2;
pub const MSR_P4_TBPU_ESCR1: u32 = 0x3c3;
pub const MSR_P4_TC_ESCR0: u32 = 0x3c4;
pub const MSR_P4_TC_ESCR1: u32 = 0x3c5;
pub const MSR_P4_U2L_ESCR0: u32 = 0x3b0;
pub const MSR_P4_U2L_ESCR1: u32 = 0x3b1;
pub const MSR_P4_PEBS_MATRIX_VERT: u32 = 0x3f2;
pub const MSR_CORE_PERF_FIXED_CTR0: u32 = 0x309;
pub const MSR_CORE_PERF_FIXED_CTR1: u32 = 0x30a;
pub const MSR_CORE_PERF_FIXED_CTR2: u32 = 0x30b;
pub const MSR_CORE_PERF_FIXED_CTR3: u32 = 0x30c;
pub const MSR_CORE_PERF_FIXED_CTR_CTRL: u32 = 0x38d;
pub const MSR_CORE_PERF_GLOBAL_STATUS: u32 = 0x38e;
pub const MSR_CORE_PERF_GLOBAL_CTRL: u32 = 0x38f;
pub const MSR_CORE_PERF_GLOBAL_OVF_CTRL: u32 = 0x390;
pub const MSR_PERF_METRICS: u32 = 0x329;
pub const MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT: u32 = 0x37;
pub const MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI: u64 = 0x80000000000000;
pub const MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT: u32 = 0x3e;
pub const MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF: u64 = 0x4000000000000000;
pub const MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT: u32 = 0x3f;
pub const MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD: i64 = -9223372036854775808;
pub const MSR_GEODE_BUSCONT_CONF0: u32 = 0x1900;
pub const MSR_IA32_VMX_BASIC: u32 = 0x480;
pub const MSR_IA32_VMX_PINBASED_CTLS: u32 = 0x481;
pub const MSR_IA32_VMX_PROCBASED_CTLS: u32 = 0x482;
pub const MSR_IA32_VMX_EXIT_CTLS: u32 = 0x483;
pub const MSR_IA32_VMX_ENTRY_CTLS: u32 = 0x484;
pub const MSR_IA32_VMX_MISC: u32 = 0x485;
pub const MSR_IA32_VMX_CR0_FIXED0: u32 = 0x486;
pub const MSR_IA32_VMX_CR0_FIXED1: u32 = 0x487;
pub const MSR_IA32_VMX_CR4_FIXED0: u32 = 0x488;
pub const MSR_IA32_VMX_CR4_FIXED1: u32 = 0x489;
pub const MSR_IA32_VMX_VMCS_ENUM: u32 = 0x48a;
pub const MSR_IA32_VMX_PROCBASED_CTLS2: u32 = 0x48b;
pub const MSR_IA32_VMX_EPT_VPID_CAP: u32 = 0x48c;
pub const MSR_IA32_VMX_TRUE_PINBASED_CTLS: u32 = 0x48d;
pub const MSR_IA32_VMX_TRUE_PROCBASED_CTLS: u32 = 0x48e;
pub const MSR_IA32_VMX_TRUE_EXIT_CTLS: u32 = 0x48f;
pub const MSR_IA32_VMX_TRUE_ENTRY_CTLS: u32 = 0x490;
pub const MSR_IA32_VMX_VMFUNC: u32 = 0x491;
pub const MSR_IA32_VMX_PROCBASED_CTLS3: u32 = 0x492;
pub const MSR_IA32_L3_QOS_CFG: u32 = 0xc81;
pub const MSR_IA32_L2_QOS_CFG: u32 = 0xc82;
pub const MSR_IA32_QM_EVTSEL: u32 = 0xc8d;
pub const MSR_IA32_QM_CTR: u32 = 0xc8e;
pub const MSR_IA32_PQR_ASSOC: u32 = 0xc8f;
pub const MSR_IA32_L3_CBM_BASE: u32 = 0xc90;
pub const MSR_RMID_SNC_CONFIG: u32 = 0xca0;
pub const MSR_IA32_L2_CBM_BASE: u32 = 0xd10;
pub const MSR_IA32_MBA_THRTL_BASE: u32 = 0xd50;
pub const MSR_IA32_MBA_BW_BASE: u32 = 0xc0000200;
pub const MSR_IA32_SMBA_BW_BASE: u32 = 0xc0000280;
pub const MSR_IA32_EVT_CFG_BASE: u32 = 0xc0000400;
pub const MSR_VM_CR: u32 = 0xc0010114;
pub const MSR_VM_IGNNE: u32 = 0xc0010115;
pub const MSR_VM_HSAVE_PA: u32 = 0xc0010117;
pub const MSR_IA32_HW_FEEDBACK_PTR: u32 = 0x17d0;
pub const MSR_IA32_HW_FEEDBACK_CONFIG: u32 = 0x17d1;
pub const MSR_IA32_XAPIC_DISABLE_STATUS: u32 = 0xbd;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/generated/arch_prctl.rs | src/vmm/src/arch/x86_64/generated/arch_prctl.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const ARCH_SET_GS: u32 = 4097;
pub const ARCH_SET_FS: u32 = 4098;
pub const ARCH_GET_FS: u32 = 4099;
pub const ARCH_GET_GS: u32 = 4100;
pub const ARCH_GET_CPUID: u32 = 4113;
pub const ARCH_SET_CPUID: u32 = 4114;
pub const ARCH_GET_XCOMP_SUPP: u32 = 4129;
pub const ARCH_GET_XCOMP_PERM: u32 = 4130;
pub const ARCH_REQ_XCOMP_PERM: u32 = 4131;
pub const ARCH_GET_XCOMP_GUEST_PERM: u32 = 4132;
pub const ARCH_REQ_XCOMP_GUEST_PERM: u32 = 4133;
pub const ARCH_XCOMP_TILECFG: u32 = 17;
pub const ARCH_XCOMP_TILEDATA: u32 = 18;
pub const ARCH_MAP_VDSO_X32: u32 = 8193;
pub const ARCH_MAP_VDSO_32: u32 = 8194;
pub const ARCH_MAP_VDSO_64: u32 = 8195;
pub const ARCH_GET_UNTAG_MASK: u32 = 16385;
pub const ARCH_ENABLE_TAGGED_ADDR: u32 = 16386;
pub const ARCH_GET_MAX_TAG_BITS: u32 = 16387;
pub const ARCH_FORCE_TAGGED_SVA: u32 = 16388;
pub const ARCH_SHSTK_ENABLE: u32 = 20481;
pub const ARCH_SHSTK_DISABLE: u32 = 20482;
pub const ARCH_SHSTK_LOCK: u32 = 20483;
pub const ARCH_SHSTK_UNLOCK: u32 = 20484;
pub const ARCH_SHSTK_STATUS: u32 = 20485;
pub const ARCH_SHSTK_SHSTK: u32 = 1;
pub const ARCH_SHSTK_WRSS: u32 = 2;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/generated/hyperv_tlfs.rs | src/vmm/src/arch/x86_64/generated/hyperv_tlfs.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const HV_X64_MSR_GUEST_OS_ID: u32 = 0x40000000;
pub const HV_X64_MSR_HYPERCALL: u32 = 0x40000001;
pub const HV_X64_MSR_VP_INDEX: u32 = 0x40000002;
pub const HV_X64_MSR_RESET: u32 = 0x40000003;
pub const HV_X64_MSR_VP_RUNTIME: u32 = 0x40000010;
pub const HV_X64_MSR_TIME_REF_COUNT: u32 = 0x40000020;
pub const HV_X64_MSR_REFERENCE_TSC: u32 = 0x40000021;
pub const HV_X64_MSR_TSC_FREQUENCY: u32 = 0x40000022;
pub const HV_X64_MSR_APIC_FREQUENCY: u32 = 0x40000023;
pub const HV_X64_MSR_EOI: u32 = 0x40000070;
pub const HV_X64_MSR_ICR: u32 = 0x40000071;
pub const HV_X64_MSR_TPR: u32 = 0x40000072;
pub const HV_X64_MSR_VP_ASSIST_PAGE: u32 = 0x40000073;
pub const HV_X64_MSR_SCONTROL: u32 = 0x40000080;
pub const HV_X64_MSR_SVERSION: u32 = 0x40000081;
pub const HV_X64_MSR_SIEFP: u32 = 0x40000082;
pub const HV_X64_MSR_SIMP: u32 = 0x40000083;
pub const HV_X64_MSR_EOM: u32 = 0x40000084;
pub const HV_X64_MSR_SINT0: u32 = 0x40000090;
pub const HV_X64_MSR_SINT1: u32 = 0x40000091;
pub const HV_X64_MSR_SINT2: u32 = 0x40000092;
pub const HV_X64_MSR_SINT3: u32 = 0x40000093;
pub const HV_X64_MSR_SINT4: u32 = 0x40000094;
pub const HV_X64_MSR_SINT5: u32 = 0x40000095;
pub const HV_X64_MSR_SINT6: u32 = 0x40000096;
pub const HV_X64_MSR_SINT7: u32 = 0x40000097;
pub const HV_X64_MSR_SINT8: u32 = 0x40000098;
pub const HV_X64_MSR_SINT9: u32 = 0x40000099;
pub const HV_X64_MSR_SINT10: u32 = 0x4000009a;
pub const HV_X64_MSR_SINT11: u32 = 0x4000009b;
pub const HV_X64_MSR_SINT12: u32 = 0x4000009c;
pub const HV_X64_MSR_SINT13: u32 = 0x4000009d;
pub const HV_X64_MSR_SINT14: u32 = 0x4000009e;
pub const HV_X64_MSR_SINT15: u32 = 0x4000009f;
pub const HV_X64_MSR_NESTED_SCONTROL: u32 = 0x40001080;
pub const HV_X64_MSR_NESTED_SVERSION: u32 = 0x40001081;
pub const HV_X64_MSR_NESTED_SIEFP: u32 = 0x40001082;
pub const HV_X64_MSR_NESTED_SIMP: u32 = 0x40001083;
pub const HV_X64_MSR_NESTED_EOM: u32 = 0x40001084;
pub const HV_X64_MSR_NESTED_SINT0: u32 = 0x40001090;
pub const HV_X64_MSR_STIMER0_CONFIG: u32 = 0x400000b0;
pub const HV_X64_MSR_STIMER0_COUNT: u32 = 0x400000b1;
pub const HV_X64_MSR_STIMER1_CONFIG: u32 = 0x400000b2;
pub const HV_X64_MSR_STIMER1_COUNT: u32 = 0x400000b3;
pub const HV_X64_MSR_STIMER2_CONFIG: u32 = 0x400000b4;
pub const HV_X64_MSR_STIMER2_COUNT: u32 = 0x400000b5;
pub const HV_X64_MSR_STIMER3_CONFIG: u32 = 0x400000b6;
pub const HV_X64_MSR_STIMER3_COUNT: u32 = 0x400000b7;
pub const HV_X64_MSR_GUEST_IDLE: u32 = 0x400000f0;
pub const HV_X64_MSR_CRASH_P0: u32 = 0x40000100;
pub const HV_X64_MSR_CRASH_P1: u32 = 0x40000101;
pub const HV_X64_MSR_CRASH_P2: u32 = 0x40000102;
pub const HV_X64_MSR_CRASH_P3: u32 = 0x40000103;
pub const HV_X64_MSR_CRASH_P4: u32 = 0x40000104;
pub const HV_X64_MSR_CRASH_CTL: u32 = 0x40000105;
pub const HV_X64_MSR_REENLIGHTENMENT_CONTROL: u32 = 0x40000106;
pub const HV_X64_MSR_TSC_EMULATION_CONTROL: u32 = 0x40000107;
pub const HV_X64_MSR_TSC_EMULATION_STATUS: u32 = 0x40000108;
pub const HV_X64_MSR_TSC_INVARIANT_CONTROL: u32 = 0x40000118;
pub const HV_X64_MSR_HYPERCALL_ENABLE: u32 = 0x1;
pub const HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT: u32 = 0xc;
pub const HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK: i32 = -4096;
pub const HV_X64_MSR_CRASH_PARAMS: u32 = 0x5;
pub const HV_X64_MSR_VP_ASSIST_PAGE_ENABLE: u32 = 0x1;
pub const HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT: u32 = 0xc;
pub const HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK: i32 = -4096;
pub const HV_X64_MSR_TSC_REFERENCE_ENABLE: u32 = 0x1;
pub const HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT: u32 = 0xc;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/generated/mod.rs | src/vmm/src/arch/x86_64/generated/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
pub mod arch_prctl;
pub mod hyperv;
pub mod hyperv_tlfs;
pub mod mpspec;
pub mod msr_index;
pub mod perf_event;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/generated/perf_event.rs | src/vmm/src/arch/x86_64/generated/perf_event.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const MSR_ARCH_PERFMON_PERFCTR0: u32 = 0xc1;
pub const MSR_ARCH_PERFMON_PERFCTR1: u32 = 0xc2;
pub const MSR_ARCH_PERFMON_EVENTSEL0: u32 = 0x186;
pub const MSR_ARCH_PERFMON_EVENTSEL1: u32 = 0x187;
pub const MSR_ARCH_PERFMON_FIXED_CTR_CTRL: u32 = 0x38d;
pub const MSR_ARCH_PERFMON_FIXED_CTR0: u32 = 0x309;
pub const MSR_ARCH_PERFMON_FIXED_CTR1: u32 = 0x30a;
pub const MSR_ARCH_PERFMON_FIXED_CTR2: u32 = 0x30b;
pub const MSR_ARCH_PERFMON_FIXED_CTR3: u32 = 0x30c;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/x86_64/generated/hyperv.rs | src/vmm/src/arch/x86_64/generated/hyperv.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const HV_X64_MSR_SYNDBG_CONTROL: u32 = 0x400000f1;
pub const HV_X64_MSR_SYNDBG_STATUS: u32 = 0x400000f2;
pub const HV_X64_MSR_SYNDBG_SEND_BUFFER: u32 = 0x400000f3;
pub const HV_X64_MSR_SYNDBG_RECV_BUFFER: u32 = 0x400000f4;
pub const HV_X64_MSR_SYNDBG_PENDING_BUFFER: u32 = 0x400000f5;
pub const HV_X64_MSR_SYNDBG_OPTIONS: u32 = 0x400000ff;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/vm.rs | src/vmm/src/arch/aarch64/vm.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Mutex;
use serde::{Deserialize, Serialize};
use crate::Kvm;
use crate::arch::aarch64::gic::GicState;
use crate::vstate::memory::{GuestMemoryExtension, GuestMemoryState};
use crate::vstate::resources::ResourceAllocator;
use crate::vstate::vm::{VmCommon, VmError};
/// Structure representing the current architecture's understand of what a "virtual machine" is.
#[derive(Debug)]
pub struct ArchVm {
/// Architecture independent parts of a vm.
pub common: VmCommon,
// On aarch64 we need to keep around the fd obtained by creating the VGIC device.
irqchip_handle: Option<crate::arch::aarch64::gic::GICDevice>,
}
/// Error type for [`Vm::restore_state`]
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum ArchVmError {
/// Error creating the global interrupt controller: {0}
VmCreateGIC(crate::arch::aarch64::gic::GicError),
/// Failed to save the VM's GIC state: {0}
SaveGic(crate::arch::aarch64::gic::GicError),
/// Failed to restore the VM's GIC state: {0}
RestoreGic(crate::arch::aarch64::gic::GicError),
}
impl ArchVm {
/// Create a new `Vm` struct.
pub fn new(kvm: &Kvm) -> Result<ArchVm, VmError> {
let common = Self::create_common(kvm)?;
Ok(ArchVm {
common,
irqchip_handle: None,
})
}
/// Pre-vCPU creation setup.
pub fn arch_pre_create_vcpus(&mut self, _: u8) -> Result<(), ArchVmError> {
Ok(())
}
/// Post-vCPU creation setup.
pub fn arch_post_create_vcpus(&mut self, nr_vcpus: u8) -> Result<(), ArchVmError> {
// On aarch64, the vCPUs need to be created (i.e call KVM_CREATE_VCPU) before setting up the
// IRQ chip because the `KVM_CREATE_VCPU` ioctl will return error if the IRQCHIP
// was already initialized.
// Search for `kvm_arch_vcpu_create` in arch/arm/kvm/arm.c.
self.setup_irqchip(nr_vcpus)
}
/// Creates the GIC (Global Interrupt Controller).
pub fn setup_irqchip(&mut self, vcpu_count: u8) -> Result<(), ArchVmError> {
self.irqchip_handle = Some(
crate::arch::aarch64::gic::create_gic(self.fd(), vcpu_count.into(), None)
.map_err(ArchVmError::VmCreateGIC)?,
);
Ok(())
}
/// Gets a reference to the irqchip of the VM.
pub fn get_irqchip(&self) -> &crate::arch::aarch64::gic::GICDevice {
self.irqchip_handle.as_ref().expect("IRQ chip not set")
}
/// Saves and returns the Kvm Vm state.
pub fn save_state(&self, mpidrs: &[u64]) -> Result<VmState, ArchVmError> {
Ok(VmState {
memory: self.common.guest_memory.describe(),
gic: self
.get_irqchip()
.save_device(mpidrs)
.map_err(ArchVmError::SaveGic)?,
resource_allocator: self.resource_allocator().clone(),
})
}
/// Restore the KVM VM state
///
/// # Errors
///
/// When [`crate::arch::aarch64::gic::GICDevice::restore_device`] errors.
pub fn restore_state(&mut self, mpidrs: &[u64], state: &VmState) -> Result<(), ArchVmError> {
self.get_irqchip()
.restore_device(mpidrs, &state.gic)
.map_err(ArchVmError::RestoreGic)?;
self.common.resource_allocator = Mutex::new(state.resource_allocator.clone());
Ok(())
}
}
/// Structure holding an general specific VM state.
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct VmState {
/// Guest memory state
pub memory: GuestMemoryState,
/// GIC state.
pub gic: GicState,
/// resource allocator
pub resource_allocator: ResourceAllocator,
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/kvm.rs | src/vmm/src/arch/aarch64/kvm.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::Infallible;
use kvm_ioctls::Kvm as KvmFd;
use crate::cpu_config::templates::KvmCapability;
/// ['Kvm'] initialization can't fail for Aarch64
pub type KvmArchError = Infallible;
/// Optional capabilities.
#[derive(Debug, Default)]
pub struct OptionalCapabilities {
/// KVM_CAP_COUNTER_OFFSET
pub counter_offset: bool,
}
/// Struct with kvm fd and kvm associated parameters.
#[derive(Debug)]
pub struct Kvm {
/// KVM fd.
pub fd: KvmFd,
/// Additional capabilities that were specified in cpu template.
pub kvm_cap_modifiers: Vec<KvmCapability>,
}
impl Kvm {
pub(crate) const DEFAULT_CAPABILITIES: [u32; 7] = [
kvm_bindings::KVM_CAP_IOEVENTFD,
kvm_bindings::KVM_CAP_IRQFD,
kvm_bindings::KVM_CAP_USER_MEMORY,
kvm_bindings::KVM_CAP_ARM_PSCI_0_2,
kvm_bindings::KVM_CAP_DEVICE_CTRL,
kvm_bindings::KVM_CAP_MP_STATE,
kvm_bindings::KVM_CAP_ONE_REG,
];
/// Initialize [`Kvm`] type for Aarch64 architecture
pub fn init_arch(
fd: KvmFd,
kvm_cap_modifiers: Vec<KvmCapability>,
) -> Result<Self, KvmArchError> {
Ok(Self {
fd,
kvm_cap_modifiers,
})
}
/// Returns struct with optional capabilities statuses.
pub fn optional_capabilities(&self) -> OptionalCapabilities {
OptionalCapabilities {
counter_offset: self
.fd
.check_extension_raw(kvm_bindings::KVM_CAP_COUNTER_OFFSET.into())
!= 0,
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/vcpu.rs | src/vmm/src/arch/aarch64/vcpu.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::{Debug, Write};
use std::mem::offset_of;
use std::sync::Arc;
use kvm_bindings::*;
use kvm_ioctls::{VcpuExit, VcpuFd, VmFd};
use serde::{Deserialize, Serialize};
use vm_memory::GuestAddress;
use super::get_fdt_addr;
use super::regs::*;
use crate::arch::EntryPoint;
use crate::arch::aarch64::kvm::OptionalCapabilities;
use crate::arch::aarch64::regs::{Aarch64RegisterVec, KVM_REG_ARM64_SVE_VLS};
use crate::cpu_config::aarch64::custom_cpu_template::VcpuFeatures;
use crate::cpu_config::templates::CpuConfiguration;
use crate::logger::{IncMetric, METRICS, error};
use crate::vcpu::{VcpuConfig, VcpuError};
use crate::vstate::bus::Bus;
use crate::vstate::memory::{Address, GuestMemoryMmap};
use crate::vstate::vcpu::VcpuEmulation;
use crate::vstate::vm::Vm;
/// Errors thrown while setting aarch64 registers.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum VcpuArchError {
/// Failed to get register {0}: {1}
GetOneReg(u64, kvm_ioctls::Error),
/// Failed to set register {0:#x} to value {1}: {2}
SetOneReg(u64, String, kvm_ioctls::Error),
/// Failed to retrieve list of registers: {0}
GetRegList(kvm_ioctls::Error),
/// Failed to get multiprocessor state: {0}
GetMp(kvm_ioctls::Error),
/// Failed to set multiprocessor state: {0}
SetMp(kvm_ioctls::Error),
/// Failed FamStructWrapper operation: {0}
Fam(vmm_sys_util::fam::Error),
/// Failed to set/get device attributes for vCPU: {0}
DeviceAttribute(kvm_ioctls::Error),
}
/// Extract the Manufacturer ID from the host.
/// The ID is found between bits 24-31 of MIDR_EL1 register.
pub fn get_manufacturer_id_from_host() -> Option<u32> {
let midr_el1_path = "/sys/devices/system/cpu/cpu0/regs/identification/midr_el1";
let midr_el1 = std::fs::read_to_string(midr_el1_path).ok()?;
let midr_el1_trimmed = midr_el1.trim_end().trim_start_matches("0x");
let manufacturer_id = u32::from_str_radix(midr_el1_trimmed, 16).ok()?;
Some(manufacturer_id >> 24)
}
/// Saves states of registers into `state`.
///
/// # Arguments
///
/// * `ids` - Slice of registers ids to save.
/// * `regs` - Input/Output vector of registers.
pub fn get_registers(
vcpu_fd: &VcpuFd,
ids: &[u64],
regs: &mut Aarch64RegisterVec,
) -> Result<(), VcpuArchError> {
let mut big_reg = [0_u8; 256];
for id in ids.iter() {
let reg_size = vcpu_fd
.get_one_reg(*id, &mut big_reg)
.map_err(|e| VcpuArchError::GetOneReg(*id, e))?;
let reg_ref = Aarch64RegisterRef::new(*id, &big_reg[0..reg_size]);
regs.push(reg_ref);
}
Ok(())
}
/// Errors associated with the wrappers over KVM ioctls.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum KvmVcpuError {
/// Error configuring the vcpu registers: {0}
ConfigureRegisters(VcpuArchError),
/// Error creating vcpu: {0}
CreateVcpu(kvm_ioctls::Error),
/// Failed to dump CPU configuration: {0}
DumpCpuConfig(VcpuArchError),
/// Error getting the vcpu preferred target: {0}
GetPreferredTarget(kvm_ioctls::Error),
/// Error initializing the vcpu: {0}
Init(kvm_ioctls::Error),
/// Error applying template: {0}
ApplyCpuTemplate(VcpuArchError),
/// Failed to restore the state of the vcpu: {0}
RestoreState(VcpuArchError),
/// Failed to save the state of the vcpu: {0}
SaveState(VcpuArchError),
}
/// Error type for [`KvmVcpu::configure`].
pub type KvmVcpuConfigureError = KvmVcpuError;
/// A wrapper around creating and using a kvm aarch64 vcpu.
#[derive(Debug)]
pub struct KvmVcpu {
/// Index of vcpu.
pub index: u8,
/// KVM vcpu fd.
pub fd: VcpuFd,
/// Vcpu peripherals, such as buses
pub peripherals: Peripherals,
kvi: kvm_vcpu_init,
/// IPA of steal_time region
pub pvtime_ipa: Option<GuestAddress>,
}
/// Vcpu peripherals
#[derive(Default, Debug)]
pub struct Peripherals {
/// mmio bus.
pub mmio_bus: Option<Arc<Bus>>,
}
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn new(index: u8, vm: &Vm) -> Result<Self, KvmVcpuError> {
let kvm_vcpu = vm
.fd()
.create_vcpu(index.into())
.map_err(KvmVcpuError::CreateVcpu)?;
let mut kvi = Self::default_kvi(vm.fd())?;
// Secondary vcpus must be powered off for boot process.
if 0 < index {
kvi.features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
}
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
peripherals: Default::default(),
kvi,
pvtime_ipa: None,
})
}
/// Read the MPIDR - Multiprocessor Affinity Register.
pub fn get_mpidr(&self) -> Result<u64, VcpuArchError> {
// MPIDR register is 64 bit wide on aarch64
let mut mpidr = [0_u8; 8];
match self.fd.get_one_reg(MPIDR_EL1, &mut mpidr) {
Err(err) => Err(VcpuArchError::GetOneReg(MPIDR_EL1, err)),
Ok(_) => Ok(u64::from_le_bytes(mpidr)),
}
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_entry_point` - Specifies the boot protocol and offset from `guest_mem` at which
/// the kernel starts.
/// * `vcpu_config` - The vCPU configuration.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_entry_point: EntryPoint,
vcpu_config: &VcpuConfig,
optional_capabilities: &OptionalCapabilities,
) -> Result<(), KvmVcpuError> {
for reg in vcpu_config.cpu_config.regs.iter() {
self.fd.set_one_reg(reg.id, reg.as_slice()).map_err(|err| {
KvmVcpuError::ApplyCpuTemplate(VcpuArchError::SetOneReg(
reg.id,
reg.value_str(),
err,
))
})?;
}
self.setup_boot_regs(
kernel_entry_point.entry_addr.raw_value(),
guest_mem,
optional_capabilities,
)
.map_err(KvmVcpuError::ConfigureRegisters)?;
Ok(())
}
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&mut self, vcpu_features: &[VcpuFeatures]) -> Result<(), KvmVcpuError> {
for feature in vcpu_features.iter() {
let index = feature.index as usize;
self.kvi.features[index] = feature.bitmap.apply(self.kvi.features[index]);
}
self.init_vcpu()?;
self.finalize_vcpu()?;
Ok(())
}
/// Creates default kvi struct based on vcpu index.
pub fn default_kvi(vm_fd: &VmFd) -> Result<kvm_vcpu_init, KvmVcpuError> {
let mut kvi = kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(KvmVcpuError::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
Ok(kvi)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState, KvmVcpuError> {
let mut state = VcpuState {
mp_state: self.get_mpstate().map_err(KvmVcpuError::SaveState)?,
..Default::default()
};
self.get_all_registers(&mut state.regs)
.map_err(KvmVcpuError::SaveState)?;
state.mpidr = self.get_mpidr().map_err(KvmVcpuError::SaveState)?;
state.kvi = self.kvi;
// We don't save power off state in a snapshot, because
// it was only needed during uVM boot process.
// When uVM is restored, the kernel has already passed
// the boot state and turned secondary vcpus on.
state.kvi.features[0] &= !(1 << KVM_ARM_VCPU_POWER_OFF);
state.pvtime_ipa = self.pvtime_ipa.map(|guest_addr| guest_addr.0);
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&mut self, state: &VcpuState) -> Result<(), KvmVcpuError> {
self.kvi = state.kvi;
self.init_vcpu()?;
// If KVM_REG_ARM64_SVE_VLS is present it needs to
// be set before vcpu is finalized.
if let Some(sve_vls_reg) = state
.regs
.iter()
.find(|reg| reg.id == KVM_REG_ARM64_SVE_VLS)
{
self.set_register(sve_vls_reg)
.map_err(KvmVcpuError::RestoreState)?;
}
self.finalize_vcpu()?;
// KVM_REG_ARM64_SVE_VLS needs to be skipped after vcpu is finalized.
// If it is present it is handled in the code above.
for reg in state
.regs
.iter()
.filter(|reg| reg.id != KVM_REG_ARM64_SVE_VLS)
{
self.set_register(reg).map_err(KvmVcpuError::RestoreState)?;
}
self.set_mpstate(state.mp_state)
.map_err(KvmVcpuError::RestoreState)?;
// Assumes that steal time memory region was set up already
if let Some(pvtime_ipa) = state.pvtime_ipa {
self.enable_pvtime(GuestAddress(pvtime_ipa))
.map_err(KvmVcpuError::RestoreState)?;
}
Ok(())
}
/// Dumps CPU configuration.
pub fn dump_cpu_config(&self) -> Result<CpuConfiguration, KvmVcpuError> {
let mut regs = Aarch64RegisterVec::default();
self.get_all_registers(&mut regs)
.map_err(KvmVcpuError::DumpCpuConfig)?;
Ok(CpuConfiguration { regs })
}
/// Initializes internal vcpufd.
fn init_vcpu(&self) -> Result<(), KvmVcpuError> {
self.fd.vcpu_init(&self.kvi).map_err(KvmVcpuError::Init)?;
Ok(())
}
/// Checks for SVE feature and calls `vcpu_finalize` if
/// it is enabled.
fn finalize_vcpu(&self) -> Result<(), KvmVcpuError> {
if (self.kvi.features[0] & (1 << KVM_ARM_VCPU_SVE)) != 0 {
// KVM_ARM_VCPU_SVE has value 4 so casting to i32 is safe.
#[allow(clippy::cast_possible_wrap)]
let feature = KVM_ARM_VCPU_SVE as i32;
self.fd.vcpu_finalize(&feature).unwrap();
}
Ok(())
}
/// Configure relevant boot registers for a given vCPU.
///
/// # Arguments
///
/// * `boot_ip` - Starting instruction pointer.
/// * `mem` - Reserved DRAM for current VM.
/// + `optional_capabilities` - which optional capabilities are enabled that might influence
/// vcpu configuration
pub fn setup_boot_regs(
&self,
boot_ip: u64,
mem: &GuestMemoryMmap,
optional_capabilities: &OptionalCapabilities,
) -> Result<(), VcpuArchError> {
let kreg_off = offset_of!(kvm_regs, regs);
// Get the register index of the PSTATE (Processor State) register.
let pstate = offset_of!(user_pt_regs, pstate) + kreg_off;
let id = arm64_core_reg_id!(KVM_REG_SIZE_U64, pstate);
self.fd
.set_one_reg(id, &PSTATE_FAULT_BITS_64.to_le_bytes())
.map_err(|err| {
VcpuArchError::SetOneReg(id, format!("{PSTATE_FAULT_BITS_64:#x}"), err)
})?;
// Other vCPUs are powered off initially awaiting PSCI wakeup.
if self.index == 0 {
// Setting the PC (Processor Counter) to the current program address (kernel address).
let pc = offset_of!(user_pt_regs, pc) + kreg_off;
let id = arm64_core_reg_id!(KVM_REG_SIZE_U64, pc);
self.fd
.set_one_reg(id, &boot_ip.to_le_bytes())
.map_err(|err| VcpuArchError::SetOneReg(id, format!("{boot_ip:#x}"), err))?;
// Last mandatory thing to set -> the address pointing to the FDT (also called DTB).
// "The device tree blob (dtb) must be placed on an 8-byte boundary and must
// not exceed 2 megabytes in size." -> https://www.kernel.org/doc/Documentation/arm64/booting.txt.
// We are choosing to place it the end of DRAM. See `get_fdt_addr`.
let regs0 = offset_of!(user_pt_regs, regs) + kreg_off;
let id = arm64_core_reg_id!(KVM_REG_SIZE_U64, regs0);
let fdt_addr = get_fdt_addr(mem);
self.fd
.set_one_reg(id, &fdt_addr.to_le_bytes())
.map_err(|err| VcpuArchError::SetOneReg(id, format!("{fdt_addr:#x}"), err))?;
// Reset the physical counter for the guest. This way we avoid guest reading
// host physical counter.
// Resetting KVM_REG_ARM_PTIMER_CNT for single vcpu is enough because there is only
// one timer struct with offsets per VM.
// Because the access to KVM_REG_ARM_PTIMER_CNT is only present starting 6.4 kernel,
// we only do the reset if KVM_CAP_COUNTER_OFFSET is present as it was added
// in the same patch series as the ability to set the KVM_REG_ARM_PTIMER_CNT register.
// Path series which introduced the needed changes:
// https://lore.kernel.org/all/20230330174800.2677007-1-maz@kernel.org/
// Note: the value observed by the guest will still be above 0, because there is a delta
// time between this resetting and first call to KVM_RUN.
if optional_capabilities.counter_offset {
self.fd
.set_one_reg(KVM_REG_ARM_PTIMER_CNT, &[0; 8])
.map_err(|err| {
VcpuArchError::SetOneReg(id, format!("{KVM_REG_ARM_PTIMER_CNT:#x}"), err)
})?;
}
}
Ok(())
}
/// Saves the states of the system registers into `state`.
///
/// # Arguments
///
/// * `regs` - Input/Output vector of registers.
pub fn get_all_registers(&self, state: &mut Aarch64RegisterVec) -> Result<(), VcpuArchError> {
get_registers(&self.fd, &self.get_all_registers_ids()?, state)
}
/// Returns all registers ids, including core and system
pub fn get_all_registers_ids(&self) -> Result<Vec<u64>, VcpuArchError> {
// Call KVM_GET_REG_LIST to get all registers available to the guest. For ArmV8 there are
// less than 500 registers expected, resize to the reported size when necessary.
let mut reg_list = RegList::new(500).map_err(VcpuArchError::Fam)?;
match self.fd.get_reg_list(&mut reg_list) {
Ok(_) => Ok(reg_list.as_slice().to_vec()),
Err(e) => match e.errno() {
libc::E2BIG => {
// resize and retry.
let size: usize = reg_list
.as_fam_struct_ref()
.n
.try_into()
// Safe to unwrap as Firecracker only targets 64-bit machines.
.unwrap();
reg_list = RegList::new(size).map_err(VcpuArchError::Fam)?;
self.fd
.get_reg_list(&mut reg_list)
.map_err(VcpuArchError::GetRegList)?;
Ok(reg_list.as_slice().to_vec())
}
_ => Err(VcpuArchError::GetRegList(e)),
},
}
}
/// Set the state of one system register.
///
/// # Arguments
///
/// * `reg` - Register to be set.
pub fn set_register(&self, reg: Aarch64RegisterRef) -> Result<(), VcpuArchError> {
self.fd
.set_one_reg(reg.id, reg.as_slice())
.map_err(|e| VcpuArchError::SetOneReg(reg.id, reg.value_str(), e))?;
Ok(())
}
/// Get the multistate processor.
///
/// # Arguments
///
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
pub fn get_mpstate(&self) -> Result<kvm_mp_state, VcpuArchError> {
self.fd.get_mp_state().map_err(VcpuArchError::GetMp)
}
/// Set the state of the system registers.
///
/// # Arguments
///
/// * `state` - Structure for returning the state of the system registers.
pub fn set_mpstate(&self, state: kvm_mp_state) -> Result<(), VcpuArchError> {
self.fd.set_mp_state(state).map_err(VcpuArchError::SetMp)
}
/// Check if pvtime (steal time on ARM) is supported for vcpu
pub fn supports_pvtime(&self) -> bool {
let pvtime_device_attr = kvm_bindings::kvm_device_attr {
group: kvm_bindings::KVM_ARM_VCPU_PVTIME_CTRL,
attr: kvm_bindings::KVM_ARM_VCPU_PVTIME_IPA as u64,
addr: 0,
flags: 0,
};
// Use kvm_has_device_attr to check if PVTime is supported
self.fd.has_device_attr(&pvtime_device_attr).is_ok()
}
/// Enables pvtime for vcpu
pub fn enable_pvtime(&mut self, ipa: GuestAddress) -> Result<(), VcpuArchError> {
self.pvtime_ipa = Some(ipa);
// Use KVM syscall (kvm_set_device_attr) to register the vCPU with the steal_time region
let vcpu_device_attr = kvm_bindings::kvm_device_attr {
group: KVM_ARM_VCPU_PVTIME_CTRL,
attr: KVM_ARM_VCPU_PVTIME_IPA as u64,
addr: &ipa.0 as *const u64 as u64, // userspace address of attr data
flags: 0,
};
self.fd
.set_device_attr(&vcpu_device_attr)
.map_err(VcpuArchError::DeviceAttribute)?;
Ok(())
}
}
impl Peripherals {
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> Result<VcpuEmulation, VcpuError> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(VcpuError::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Default, Clone, Serialize, Deserialize)]
pub struct VcpuState {
/// Multiprocessing state.
pub mp_state: kvm_mp_state,
/// Vcpu registers.
pub regs: Aarch64RegisterVec,
/// We will be using the mpidr for passing it to the VmState.
/// The VmState will give this away for saving restoring the icc and redistributor
/// registers.
pub mpidr: u64,
/// kvi states for vcpu initialization.
pub kvi: kvm_vcpu_init,
/// ipa for steal_time region
pub pvtime_ipa: Option<u64>,
}
impl Debug for VcpuState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "kvm_mp_state: {:#x}", self.mp_state.mp_state)?;
writeln!(f, "mpidr: {:#x}", self.mpidr)?;
for reg in self.regs.iter() {
writeln!(
f,
"{:#x} 0x{}",
reg.id,
reg.as_slice()
.iter()
.rev()
.fold(String::new(), |mut output, b| {
let _ = write!(output, "{b:x}");
output
})
)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use kvm_bindings::{KVM_ARM_VCPU_PSCI_0_2, KVM_REG_SIZE_U64};
use vm_memory::GuestAddress;
use super::*;
use crate::arch::BootProtocol;
use crate::arch::aarch64::layout;
use crate::arch::aarch64::regs::Aarch64RegisterRef;
use crate::cpu_config::aarch64::CpuConfiguration;
use crate::cpu_config::templates::RegisterValueFilter;
use crate::test_utils::arch_mem;
use crate::vcpu::VcpuConfig;
use crate::vstate::kvm::Kvm;
use crate::vstate::vm::Vm;
use crate::vstate::vm::tests::setup_vm_with_memory;
fn setup_vcpu(mem_size: usize) -> (Kvm, Vm, KvmVcpu) {
let (kvm, mut vm, mut vcpu) = setup_vcpu_no_init(mem_size);
vcpu.init(&[]).unwrap();
vm.setup_irqchip(1).unwrap();
(kvm, vm, vcpu)
}
fn setup_vcpu_no_init(mem_size: usize) -> (Kvm, Vm, KvmVcpu) {
let (kvm, vm) = setup_vm_with_memory(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
(kvm, vm, vcpu)
}
#[test]
fn test_create_vcpu() {
let (_, vm) = setup_vm_with_memory(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
// dropping vm would double close the gic fd, so leak it
// do the drop before assertion. Otherwise if assert fails,
// we get IO runtime error instead of assert error.
std::mem::forget(vm);
assert_eq!(
err.err().unwrap().to_string(),
"Error creating vcpu: Bad file descriptor (os error 9)".to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (kvm, vm, mut vcpu) = setup_vcpu(0x10000);
let optional_capabilities = kvm.optional_capabilities();
let vcpu_config = VcpuConfig {
vcpu_count: 1,
smt: false,
cpu_config: CpuConfiguration::default(),
};
vcpu.configure(
vm.guest_memory(),
EntryPoint {
entry_addr: GuestAddress(crate::arch::get_kernel_start()),
protocol: BootProtocol::LinuxBoot,
},
&vcpu_config,
&optional_capabilities,
)
.unwrap();
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(
vm.guest_memory(),
EntryPoint {
entry_addr: GuestAddress(crate::arch::get_kernel_start()),
protocol: BootProtocol::LinuxBoot,
},
&vcpu_config,
&optional_capabilities,
);
// dropping vcpu would double close the gic fd, so leak it
// do the drop before assertion. Otherwise if assert fails,
// we get IO runtime error instead of assert error.
std::mem::forget(vcpu);
assert_eq!(
err.unwrap_err(),
KvmVcpuError::ConfigureRegisters(VcpuArchError::SetOneReg(
0x6030000000100042,
"0x3c5".to_string(),
kvm_ioctls::Error::new(9)
))
);
}
#[test]
fn test_init_vcpu() {
let (_, mut vm) = setup_vm_with_memory(0x1000);
let mut vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// KVM_ARM_VCPU_PSCI_0_2 is set by default.
// we check if we can remove it.
let vcpu_features = vec![VcpuFeatures {
index: 0,
bitmap: RegisterValueFilter {
filter: 1 << KVM_ARM_VCPU_PSCI_0_2,
value: 0,
},
}];
vcpu.init(&vcpu_features).unwrap();
assert!((vcpu.kvi.features[0] & (1 << KVM_ARM_VCPU_PSCI_0_2)) == 0)
}
#[test]
fn test_vcpu_save_restore_state() {
let (_, mut vm) = setup_vm_with_memory(0x1000);
let mut vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// Calling KVM_GET_REGLIST before KVM_VCPU_INIT will result in error.
let res = vcpu.save_state();
assert!(matches!(
res.unwrap_err(),
KvmVcpuError::SaveState(VcpuArchError::GetRegList(_))
));
// Try to restore the register using a faulty state.
let mut faulty_vcpu_state = VcpuState::default();
// Try faulty kvi state
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(matches!(res.unwrap_err(), KvmVcpuError::Init(_)));
// Try faulty vcpu regs
faulty_vcpu_state.kvi = KvmVcpu::default_kvi(vm.fd()).unwrap();
let mut regs = Aarch64RegisterVec::default();
let mut reg = Aarch64RegisterRef::new(KVM_REG_SIZE_U64, &[0; 8]);
reg.id = 0;
regs.push(reg);
faulty_vcpu_state.regs = regs;
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(matches!(
res.unwrap_err(),
KvmVcpuError::RestoreState(VcpuArchError::SetOneReg(0, _, _))
));
vcpu.init(&[]).unwrap();
let state = vcpu.save_state().expect("Cannot save state of vcpu");
assert!(!state.regs.is_empty());
vcpu.restore_state(&state)
.expect("Cannot restore state of vcpu");
}
#[test]
fn test_dump_cpu_config_before_init() {
// Test `dump_cpu_config()` before `KVM_VCPU_INIT`.
//
// This should fail with ENOEXEC.
// https://elixir.bootlin.com/linux/v5.10.176/source/arch/arm64/kvm/arm.c#L1165
let (_, mut vm) = setup_vm_with_memory(0x1000);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
vcpu.dump_cpu_config().unwrap_err();
}
#[test]
fn test_dump_cpu_config_after_init() {
// Test `dump_cpu_config()` after `KVM_VCPU_INIT`.
let (_, mut vm) = setup_vm_with_memory(0x1000);
let mut vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
vcpu.init(&[]).unwrap();
vcpu.dump_cpu_config().unwrap();
}
#[test]
fn test_setup_non_boot_vcpu() {
let (_, vm) = setup_vm_with_memory(0x1000);
let mut vcpu1 = KvmVcpu::new(0, &vm).unwrap();
vcpu1.init(&[]).unwrap();
let mut vcpu2 = KvmVcpu::new(1, &vm).unwrap();
vcpu2.init(&[]).unwrap();
}
#[test]
fn test_get_valid_regs() {
// Test `get_regs()` with valid register IDs.
// - X0: 0x6030 0000 0010 0000
// - X1: 0x6030 0000 0010 0002
let (_, _, vcpu) = setup_vcpu(0x10000);
let reg_list = Vec::<u64>::from([0x6030000000100000, 0x6030000000100002]);
get_registers(&vcpu.fd, ®_list, &mut Aarch64RegisterVec::default()).unwrap();
}
#[test]
fn test_get_invalid_regs() {
// Test `get_regs()` with invalid register IDs.
let (_, _, vcpu) = setup_vcpu(0x10000);
let reg_list = Vec::<u64>::from([0x6030000000100001, 0x6030000000100003]);
get_registers(&vcpu.fd, ®_list, &mut Aarch64RegisterVec::default()).unwrap_err();
}
#[test]
fn test_setup_regs() {
let (kvm, _, vcpu) = setup_vcpu_no_init(0x10000);
let mem = arch_mem(layout::FDT_MAX_SIZE + 0x1000);
let optional_capabilities = kvm.optional_capabilities();
let res = vcpu.setup_boot_regs(0x0, &mem, &optional_capabilities);
assert!(matches!(
res.unwrap_err(),
VcpuArchError::SetOneReg(0x6030000000100042, _, _)
));
vcpu.init_vcpu().unwrap();
vcpu.setup_boot_regs(0x0, &mem, &optional_capabilities)
.unwrap();
// Check that the register is reset on compatible kernels.
// Because there is a delta in time between we reset the register and time we
// read it, we cannot compare with 0. Instead we compare it with meaningfully
// small value.
if optional_capabilities.counter_offset {
let mut reg_bytes = [0_u8; 8];
vcpu.fd.get_one_reg(SYS_CNTPCT_EL0, &mut reg_bytes).unwrap();
let counter_value = u64::from_le_bytes(reg_bytes);
// We are reading the SYS_CNTPCT_EL0 right after resetting it.
// If reset did happen successfully, the value should be quite small when we read it.
// If the reset did not happen, the value will be same as on the host and it surely
// will be more that `max_value`. Measurements show that usually value is close
// to 1000. Use bigger `max_value` just in case.
let max_value = 10_000;
assert!(counter_value < max_value);
}
}
#[test]
fn test_read_mpidr() {
let (_, _, vcpu) = setup_vcpu_no_init(0x10000);
// Must fail when vcpu is not initialized yet.
let res = vcpu.get_mpidr();
assert!(matches!(
res.unwrap_err(),
VcpuArchError::GetOneReg(MPIDR_EL1, _)
));
vcpu.init_vcpu().unwrap();
assert_eq!(vcpu.get_mpidr().unwrap(), 0x8000_0000);
}
#[test]
fn test_get_set_regs() {
let (_, _, vcpu) = setup_vcpu_no_init(0x10000);
// Must fail when vcpu is not initialized yet.
let mut regs = Aarch64RegisterVec::default();
let res = vcpu.get_all_registers(&mut regs);
assert!(matches!(res.unwrap_err(), VcpuArchError::GetRegList(_)));
vcpu.init_vcpu().unwrap();
vcpu.get_all_registers(&mut regs).unwrap();
for reg in regs.iter() {
vcpu.set_register(reg).unwrap();
}
}
#[test]
fn test_mpstate() {
use std::os::unix::io::AsRawFd;
let (_, _, vcpu) = setup_vcpu(0x10000);
let res = vcpu.get_mpstate();
vcpu.set_mpstate(res.unwrap()).unwrap();
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let res = vcpu.get_mpstate();
assert!(matches!(res, Err(VcpuArchError::GetMp(_))), "{:?}", res);
let res = vcpu.set_mpstate(kvm_mp_state::default());
// dropping vcpu would double close the fd, so leak it
// do the drop before assertion. Otherwise if assert fails,
// we get IO runtime error instead of assert error.
std::mem::forget(vcpu);
assert!(matches!(res, Err(VcpuArchError::SetMp(_))), "{:?}", res);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/cache_info.rs | src/vmm/src/arch/aarch64/cache_info.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::path::{Path, PathBuf};
use std::{fs, io};
use crate::logger::warn;
// Based on https://elixir.free-electrons.com/linux/v4.9.62/source/arch/arm64/kernel/cacheinfo.c#L29.
const MAX_CACHE_LEVEL: u8 = 7;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub(crate) enum CacheInfoError {
/// Failed to read cache information: {0}
FailedToReadCacheInfo(#[from] io::Error),
/// Invalid cache configuration found for {0}: {1}
InvalidCacheAttr(String, String),
/// Cannot read cache level.
MissingCacheLevel,
/// Cannot read cache type.
MissingCacheType,
/// {0}
MissingOptionalAttr(String, CacheEntry),
}
struct CacheEngine {
store: Box<dyn CacheStore>,
}
trait CacheStore: std::fmt::Debug {
fn get_by_key(&self, index: u8, file_name: &str) -> Result<String, CacheInfoError>;
}
#[derive(Debug)]
pub(crate) struct CacheEntry {
// Cache Level: 1, 2, 3..
pub level: u8,
// Type of cache: Unified, Data, Instruction.
pub type_: CacheType,
pub size_: Option<u32>,
pub number_of_sets: Option<u32>,
pub line_size: Option<u16>,
// How many CPUS share this cache.
pub cpus_per_unit: u16,
}
#[derive(Debug)]
#[cfg_attr(test, allow(dead_code))]
struct HostCacheStore {
cache_dir: PathBuf,
}
#[cfg(not(test))]
impl Default for CacheEngine {
fn default() -> Self {
CacheEngine {
store: Box::new(HostCacheStore {
cache_dir: PathBuf::from("/sys/devices/system/cpu/cpu0/cache"),
}),
}
}
}
impl CacheStore for HostCacheStore {
fn get_by_key(&self, index: u8, file_name: &str) -> Result<String, CacheInfoError> {
readln_special(&PathBuf::from(format!(
"{}/index{}/{}",
self.cache_dir.as_path().display(),
index,
file_name
)))
}
}
impl CacheEntry {
fn from_index(index: u8, store: &dyn CacheStore) -> Result<CacheEntry, CacheInfoError> {
let mut err_str = String::new();
let mut cache: CacheEntry = CacheEntry::default();
// If the cache level or the type cannot be retrieved we stop the process
// of populating the cache levels.
let level_str = store
.get_by_key(index, "level")
.map_err(|_| CacheInfoError::MissingCacheLevel)?;
cache.level = level_str.parse::<u8>().map_err(|err| {
CacheInfoError::InvalidCacheAttr("level".to_string(), err.to_string())
})?;
let cache_type_str = store
.get_by_key(index, "type")
.map_err(|_| CacheInfoError::MissingCacheType)?;
cache.type_ = CacheType::try_from(&cache_type_str)?;
if let Ok(shared_cpu_map) = store.get_by_key(index, "shared_cpu_map") {
cache.cpus_per_unit = mask_str2bit_count(shared_cpu_map.trim_end())?;
} else {
err_str += "shared cpu map";
err_str += ", ";
}
if let Ok(coherency_line_size) = store.get_by_key(index, "coherency_line_size") {
cache.line_size = Some(coherency_line_size.parse::<u16>().map_err(|err| {
CacheInfoError::InvalidCacheAttr("coherency_line_size".to_string(), err.to_string())
})?);
} else {
err_str += "coherency line size";
err_str += ", ";
}
if let Ok(mut size) = store.get_by_key(index, "size") {
cache.size_ = Some(to_bytes(&mut size)?);
} else {
err_str += "size";
err_str += ", ";
}
if let Ok(number_of_sets) = store.get_by_key(index, "number_of_sets") {
cache.number_of_sets = Some(number_of_sets.parse::<u32>().map_err(|err| {
CacheInfoError::InvalidCacheAttr("number_of_sets".to_string(), err.to_string())
})?);
} else {
err_str += "number of sets";
err_str += ", ";
}
// Pop the last 2 chars if a comma and space are present.
// The unwrap is safe since we check that the string actually
// ends with those 2 chars.
if err_str.ends_with(", ") {
err_str.pop().unwrap();
err_str.pop().unwrap();
}
if !err_str.is_empty() {
return Err(CacheInfoError::MissingOptionalAttr(err_str, cache));
}
Ok(cache)
}
}
impl Default for CacheEntry {
fn default() -> Self {
CacheEntry {
level: 0,
type_: CacheType::Unified,
size_: None,
number_of_sets: None,
line_size: None,
cpus_per_unit: 1,
}
}
}
#[derive(Debug)]
// Based on https://elixir.free-electrons.com/linux/v4.9.62/source/include/linux/cacheinfo.h#L11.
pub(crate) enum CacheType {
Instruction,
Data,
Unified,
}
impl CacheType {
fn try_from(string: &str) -> Result<Self, CacheInfoError> {
match string.trim() {
"Instruction" => Ok(Self::Instruction),
"Data" => Ok(Self::Data),
"Unified" => Ok(Self::Unified),
cache_type => Err(CacheInfoError::InvalidCacheAttr(
"type".to_string(),
cache_type.to_string(),
)),
}
}
// The below are auxiliary functions used for constructing the FDT.
pub fn of_cache_size(&self) -> &str {
match self {
Self::Instruction => "i-cache-size",
Self::Data => "d-cache-size",
Self::Unified => "cache-size",
}
}
pub fn of_cache_line_size(&self) -> &str {
match self {
Self::Instruction => "i-cache-line-size",
Self::Data => "d-cache-line-size",
Self::Unified => "cache-line-size",
}
}
pub fn of_cache_type(&self) -> Option<&'static str> {
match self {
Self::Instruction => None,
Self::Data => None,
Self::Unified => Some("cache-unified"),
}
}
pub fn of_cache_sets(&self) -> &str {
match self {
Self::Instruction => "i-cache-sets",
Self::Data => "d-cache-sets",
Self::Unified => "cache-sets",
}
}
}
#[cfg_attr(test, allow(unused))]
fn readln_special<T: AsRef<Path>>(file_path: &T) -> Result<String, CacheInfoError> {
let line = fs::read_to_string(file_path)?;
Ok(line.trim_end().to_string())
}
fn to_bytes(cache_size_pretty: &mut String) -> Result<u32, CacheInfoError> {
match cache_size_pretty.pop() {
Some('K') => Ok(cache_size_pretty.parse::<u32>().map_err(|err| {
CacheInfoError::InvalidCacheAttr("size".to_string(), err.to_string())
})? * 1024),
Some('M') => Ok(cache_size_pretty.parse::<u32>().map_err(|err| {
CacheInfoError::InvalidCacheAttr("size".to_string(), err.to_string())
})? * 1024
* 1024),
Some(letter) => {
cache_size_pretty.push(letter);
Err(CacheInfoError::InvalidCacheAttr(
"size".to_string(),
(*cache_size_pretty).to_string(),
))
}
_ => Err(CacheInfoError::InvalidCacheAttr(
"size".to_string(),
"Empty string was provided".to_string(),
)),
}
}
// Helper function to count the number of set bits from a bitmap
// formatted string (see %*pb in the printk formats).
// Expected input is a list of 32-bit comma separated hex values,
// without the 0x prefix.
//
fn mask_str2bit_count(mask_str: &str) -> Result<u16, CacheInfoError> {
let split_mask_iter = mask_str.split(',');
let mut bit_count: u16 = 0;
for s in split_mask_iter {
let mut s_zero_free = s.trim_start_matches('0');
if s_zero_free.is_empty() {
s_zero_free = "0";
}
bit_count += u16::try_from(
u32::from_str_radix(s_zero_free, 16)
.map_err(|err| {
CacheInfoError::InvalidCacheAttr("shared_cpu_map".to_string(), err.to_string())
})?
.count_ones(),
)
.unwrap(); // Safe because this is at most 32
}
if bit_count == 0 {
return Err(CacheInfoError::InvalidCacheAttr(
"shared_cpu_map".to_string(),
mask_str.to_string(),
));
}
Ok(bit_count)
}
fn append_cache_level(
cache_l1: &mut Vec<CacheEntry>,
cache_non_l1: &mut Vec<CacheEntry>,
cache: CacheEntry,
) {
if cache.level == 1 {
cache_l1.push(cache);
} else {
cache_non_l1.push(cache);
}
}
pub(crate) fn read_cache_config(
cache_l1: &mut Vec<CacheEntry>,
cache_non_l1: &mut Vec<CacheEntry>,
) -> Result<(), CacheInfoError> {
// It is used to make sure we log warnings for missing files only for one level because
// if an attribute is missing for a level for sure it will be missing for other levels too.
// Also without this mechanism we would be logging the warnings for each level which pollutes
// a lot the logs.
let mut logged_missing_attr = false;
let engine = CacheEngine::default();
for index in 0..=MAX_CACHE_LEVEL {
match CacheEntry::from_index(index, engine.store.as_ref()) {
Ok(cache) => {
append_cache_level(cache_l1, cache_non_l1, cache);
}
// Missing cache level or type means not further search is necessary.
Err(CacheInfoError::MissingCacheLevel) | Err(CacheInfoError::MissingCacheType) => break,
// Missing cache files is not necessary an error so we
// do not propagate it upwards. We were prudent enough to log it.
Err(CacheInfoError::MissingOptionalAttr(msg, cache)) => {
let level = cache.level;
append_cache_level(cache_l1, cache_non_l1, cache);
if !msg.is_empty() && !logged_missing_attr {
warn!("Could not read the {msg} for cache level {level}.");
logged_missing_attr = true;
}
}
Err(err) => return Err(err),
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::arch::aarch64::cache_info::{
CacheEngine, CacheEntry, CacheStore, read_cache_config,
};
#[derive(Debug)]
struct MockCacheStore {
dummy_fs: HashMap<String, String>,
}
impl Default for CacheEngine {
fn default() -> Self {
CacheEngine {
store: Box::new(MockCacheStore {
dummy_fs: create_default_store(),
}),
}
}
}
impl CacheEngine {
fn new(map: &HashMap<String, String>) -> Self {
CacheEngine {
store: Box::new(MockCacheStore {
dummy_fs: map.clone(),
}),
}
}
}
impl CacheStore for MockCacheStore {
fn get_by_key(&self, index: u8, file_name: &str) -> Result<String, CacheInfoError> {
let key = format!("index{}/{}", index, file_name);
if let Some(val) = self.dummy_fs.get(&key) {
Ok(val.to_string())
} else {
Err(CacheInfoError::FailedToReadCacheInfo(
io::Error::from_raw_os_error(0),
))
}
}
}
fn create_default_store() -> HashMap<String, String> {
let mut cache_struct = HashMap::new();
cache_struct.insert("index0/level".to_string(), "1".to_string());
cache_struct.insert("index0/type".to_string(), "Data".to_string());
cache_struct.insert("index1/level".to_string(), "1".to_string());
cache_struct.insert("index1/type".to_string(), "Instruction".to_string());
cache_struct.insert("index2/level".to_string(), "2".to_string());
cache_struct.insert("index2/type".to_string(), "Unified".to_string());
cache_struct
}
#[test]
fn test_mask_str2bit_count() {
mask_str2bit_count("00000000,00000001").unwrap();
let res = mask_str2bit_count("00000000,00000000");
assert!(
res.is_err()
&& format!("{}", res.unwrap_err())
== "Invalid cache configuration found for shared_cpu_map: 00000000,00000000"
);
let res = mask_str2bit_count("00000000;00000001");
assert!(
res.is_err()
&& format!("{}", res.unwrap_err())
== "Invalid cache configuration found for shared_cpu_map: invalid digit found \
in string"
);
}
#[test]
fn test_to_bytes() {
to_bytes(&mut "64K".to_string()).unwrap();
to_bytes(&mut "64M".to_string()).unwrap();
match to_bytes(&mut "64KK".to_string()) {
Err(err) => assert_eq!(
format!("{}", err),
"Invalid cache configuration found for size: invalid digit found in string"
),
_ => panic!("This should be an error!"),
}
let res = to_bytes(&mut "64G".to_string());
assert!(
res.is_err()
&& format!("{}", res.unwrap_err())
== "Invalid cache configuration found for size: 64G"
);
let res = to_bytes(&mut "".to_string());
assert!(
res.is_err()
&& format!("{}", res.unwrap_err())
== "Invalid cache configuration found for size: Empty string was provided"
);
}
#[test]
fn test_cache_level() {
let mut default_map = create_default_store();
let mut map1 = default_map.clone();
map1.remove("index0/type");
let engine = CacheEngine::new(&map1);
let res = CacheEntry::from_index(0, engine.store.as_ref());
// We did create the level file but we still do not have the type file.
assert!(matches!(res.unwrap_err(), CacheInfoError::MissingCacheType));
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"shared cpu map, coherency line size, size, number of sets",
);
// Now putting some invalid values in the type and level files.
let mut map2 = default_map.clone();
map2.insert("index0/level".to_string(), "d".to_string());
let engine = CacheEngine::new(&map2);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"Invalid cache configuration found for level: invalid digit found in string"
);
default_map.insert("index0/type".to_string(), "Instructionn".to_string());
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"Invalid cache configuration found for type: Instructionn"
);
}
#[test]
fn test_cache_shared_cpu_map() {
let mut default_map = create_default_store();
default_map.insert(
"index0/shared_cpu_map".to_string(),
"00000000,00000001".to_string(),
);
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"coherency line size, size, number of sets"
);
default_map.insert(
"index0/shared_cpu_map".to_string(),
"00000000,0000000G".to_string(),
);
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"Invalid cache configuration found for shared_cpu_map: invalid digit found in string"
);
default_map.insert("index0/shared_cpu_map".to_string(), "00000000".to_string());
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"Invalid cache configuration found for shared_cpu_map: 00000000"
);
}
#[test]
fn test_cache_coherency() {
let mut default_map = create_default_store();
default_map.insert("index0/coherency_line_size".to_string(), "64".to_string());
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
"shared cpu map, size, number of sets",
format!("{}", res.unwrap_err())
);
default_map.insert(
"index0/coherency_line_size".to_string(),
"Instruction".to_string(),
);
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"Invalid cache configuration found for coherency_line_size: invalid digit found in \
string"
);
}
#[test]
fn test_cache_size() {
let mut default_map = create_default_store();
default_map.insert("index0/size".to_string(), "64K".to_string());
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"shared cpu map, coherency line size, number of sets",
);
default_map.insert("index0/size".to_string(), "64".to_string());
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"Invalid cache configuration found for size: 64"
);
default_map.insert("index0/size".to_string(), "64Z".to_string());
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"Invalid cache configuration found for size: 64Z"
);
}
#[test]
fn test_cache_no_sets() {
let mut default_map = create_default_store();
default_map.insert("index0/number_of_sets".to_string(), "64".to_string());
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
"shared cpu map, coherency line size, size",
format!("{}", res.unwrap_err())
);
default_map.insert("index0/number_of_sets".to_string(), "64K".to_string());
let engine = CacheEngine::new(&default_map);
let res = CacheEntry::from_index(0, engine.store.as_ref());
assert_eq!(
format!("{}", res.unwrap_err()),
"Invalid cache configuration found for number_of_sets: invalid digit found in string"
);
}
#[test]
fn test_sysfs_read_caches() {
let mut l1_caches: Vec<CacheEntry> = Vec::new();
let mut non_l1_caches: Vec<CacheEntry> = Vec::new();
// We use sysfs for extracting the cache information.
read_cache_config(&mut l1_caches, &mut non_l1_caches).unwrap();
assert_eq!(l1_caches.len(), 2);
assert_eq!(l1_caches.len(), 2);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/fdt.rs | src/vmm/src/arch/aarch64/fdt.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::ffi::CString;
use std::fmt::Debug;
use vm_fdt::{Error as VmFdtError, FdtWriter, FdtWriterNode};
use vm_memory::{GuestMemoryError, GuestMemoryRegion};
use super::cache_info::{CacheEntry, read_cache_config};
use super::gic::GICDevice;
use crate::arch::{
MEM_32BIT_DEVICES_SIZE, MEM_32BIT_DEVICES_START, MEM_64BIT_DEVICES_SIZE,
MEM_64BIT_DEVICES_START, PCI_MMIO_CONFIG_SIZE_PER_SEGMENT,
};
use crate::device_manager::DeviceManager;
use crate::device_manager::mmio::MMIODeviceInfo;
use crate::device_manager::pci_mngr::PciDevices;
use crate::devices::acpi::vmgenid::{VMGENID_MEM_SIZE, VmGenId};
use crate::initrd::InitrdConfig;
use crate::vstate::memory::{Address, GuestMemory, GuestMemoryMmap, GuestRegionType};
// This is a value for uniquely identifying the FDT node declaring the interrupt controller.
const GIC_PHANDLE: u32 = 1;
// This is a value for uniquely identifying the FDT node containing the clock definition.
const CLOCK_PHANDLE: u32 = 2;
// This is a value for uniquely identifying the FDT node declaring the MSI controller.
const MSI_PHANDLE: u32 = 3;
// You may be wondering why this big value?
// This phandle is used to uniquely identify the FDT nodes containing cache information. Each cpu
// can have a variable number of caches, some of these caches may be shared with other cpus.
// So, we start the indexing of the phandles used from a really big number and then subtract from
// it as we need more and more phandle for each cache representation.
const LAST_CACHE_PHANDLE: u32 = 4000;
// Read the documentation specified when appending the root node to the FDT.
const ADDRESS_CELLS: u32 = 0x2;
const SIZE_CELLS: u32 = 0x2;
// As per kvm tool and
// https://www.kernel.org/doc/Documentation/devicetree/bindings/interrupt-controller/arm%2Cgic.txt
// Look for "The 1st cell..."
const GIC_FDT_IRQ_TYPE_SPI: u32 = 0;
const GIC_FDT_IRQ_TYPE_PPI: u32 = 1;
// From https://elixir.bootlin.com/linux/v4.9.62/source/include/dt-bindings/interrupt-controller/irq.h#L17
const IRQ_TYPE_EDGE_RISING: u32 = 1;
const IRQ_TYPE_LEVEL_HI: u32 = 4;
/// Errors thrown while configuring the Flattened Device Tree for aarch64.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum FdtError {
/// Create FDT error: {0}
CreateFdt(#[from] VmFdtError),
/// Read cache info error: {0}
ReadCacheInfo(String),
/// Failure in writing FDT in memory.
WriteFdtToMemory(#[from] GuestMemoryError),
}
#[allow(clippy::too_many_arguments)]
/// Creates the flattened device tree for this aarch64 microVM.
pub fn create_fdt(
guest_mem: &GuestMemoryMmap,
vcpu_mpidr: Vec<u64>,
cmdline: CString,
device_manager: &DeviceManager,
gic_device: &GICDevice,
initrd: &Option<InitrdConfig>,
) -> Result<Vec<u8>, FdtError> {
// Allocate stuff necessary for storing the blob.
let mut fdt_writer = FdtWriter::new()?;
// For an explanation why these nodes were introduced in the blob take a look at
// https://github.com/torvalds/linux/blob/master/Documentation/devicetree/booting-without-of.txt#L845
// Look for "Required nodes and properties".
// Header or the root node as per above mentioned documentation.
let root = fdt_writer.begin_node("")?;
fdt_writer.property_string("compatible", "linux,dummy-virt")?;
// For info on #address-cells and size-cells read "Note about cells and address representation"
// from the above mentioned txt file.
fdt_writer.property_u32("#address-cells", ADDRESS_CELLS)?;
fdt_writer.property_u32("#size-cells", SIZE_CELLS)?;
// This is not mandatory but we use it to point the root node to the node
// containing description of the interrupt controller for this VM.
fdt_writer.property_u32("interrupt-parent", GIC_PHANDLE)?;
create_cpu_nodes(&mut fdt_writer, &vcpu_mpidr)?;
create_memory_node(&mut fdt_writer, guest_mem)?;
create_chosen_node(&mut fdt_writer, cmdline, initrd)?;
create_gic_node(&mut fdt_writer, gic_device)?;
create_timer_node(&mut fdt_writer)?;
create_clock_node(&mut fdt_writer)?;
create_psci_node(&mut fdt_writer)?;
create_devices_node(&mut fdt_writer, device_manager)?;
create_vmgenid_node(&mut fdt_writer, &device_manager.acpi_devices.vmgenid)?;
create_pci_nodes(&mut fdt_writer, &device_manager.pci_devices)?;
// End Header node.
fdt_writer.end_node(root)?;
// Allocate another buffer so we can format and then write fdt to guest.
let fdt_final = fdt_writer.finish()?;
Ok(fdt_final)
}
// Following are the auxiliary function for creating the different nodes that we append to our FDT.
fn create_cpu_nodes(fdt: &mut FdtWriter, vcpu_mpidr: &[u64]) -> Result<(), FdtError> {
// Since the L1 caches are not shareable among CPUs and they are direct attributes of the
// cpu in the device tree, we process the L1 and non-L1 caches separately.
// We use sysfs for extracting the cache information.
let mut l1_caches: Vec<CacheEntry> = Vec::new();
let mut non_l1_caches: Vec<CacheEntry> = Vec::new();
// We use sysfs for extracting the cache information.
read_cache_config(&mut l1_caches, &mut non_l1_caches)
.map_err(|err| FdtError::ReadCacheInfo(err.to_string()))?;
// See https://github.com/torvalds/linux/blob/master/Documentation/devicetree/bindings/arm/cpus.yaml.
let cpus = fdt.begin_node("cpus")?;
// As per documentation, on ARM v8 64-bit systems value should be set to 2.
fdt.property_u32("#address-cells", 0x02)?;
fdt.property_u32("#size-cells", 0x0)?;
let num_cpus = vcpu_mpidr.len();
for (cpu_index, mpidr) in vcpu_mpidr.iter().enumerate() {
let cpu = fdt.begin_node(&format!("cpu@{:x}", cpu_index))?;
fdt.property_string("device_type", "cpu")?;
fdt.property_string("compatible", "arm,arm-v8")?;
// The power state coordination interface (PSCI) needs to be enabled for
// all vcpus.
fdt.property_string("enable-method", "psci")?;
// Set the field to first 24 bits of the MPIDR - Multiprocessor Affinity Register.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0488c/BABHBJCI.html.
fdt.property_u64("reg", mpidr & 0x7FFFFF)?;
for cache in l1_caches.iter() {
// Please check out
// https://github.com/devicetree-org/devicetree-specification/releases/download/v0.3/devicetree-specification-v0.3.pdf,
// section 3.8.
if let Some(size) = cache.size_ {
fdt.property_u32(cache.type_.of_cache_size(), size)?;
}
if let Some(line_size) = cache.line_size {
fdt.property_u32(cache.type_.of_cache_line_size(), u32::from(line_size))?;
}
if let Some(number_of_sets) = cache.number_of_sets {
fdt.property_u32(cache.type_.of_cache_sets(), number_of_sets)?;
}
}
// Some of the non-l1 caches can be shared amongst CPUs. You can see an example of a shared
// scenario in https://github.com/devicetree-org/devicetree-specification/releases/download/v0.3/devicetree-specification-v0.3.pdf,
// 3.8.1 Example.
let mut prev_level = 1;
let mut cache_node: Option<FdtWriterNode> = None;
for cache in non_l1_caches.iter() {
// We append the next-level-cache node (the node that specifies the cache hierarchy)
// in the next iteration. For example,
// L2-cache {
// cache-size = <0x8000> ----> first iteration
// next-level-cache = <&l3-cache> ---> second iteration
// }
// The cpus per unit cannot be 0 since the sysfs will also include the current cpu
// in the list of shared cpus so it needs to be at least 1. Firecracker trusts the host.
// The operation is safe since we already checked when creating cache attributes that
// cpus_per_unit is not 0 (.e look for mask_str2bit_count function).
let cache_phandle = LAST_CACHE_PHANDLE
- u32::try_from(
num_cpus * (cache.level - 2) as usize
+ cpu_index / cache.cpus_per_unit as usize,
)
.unwrap(); // Safe because the number of CPUs is bounded
if prev_level != cache.level {
fdt.property_u32("next-level-cache", cache_phandle)?;
if prev_level > 1 && cache_node.is_some() {
fdt.end_node(cache_node.take().unwrap())?;
}
}
if cpu_index % cache.cpus_per_unit as usize == 0 {
cache_node = Some(fdt.begin_node(&format!(
"l{}-{}-cache",
cache.level,
cpu_index / cache.cpus_per_unit as usize
))?);
fdt.property_u32("phandle", cache_phandle)?;
fdt.property_string("compatible", "cache")?;
fdt.property_u32("cache-level", u32::from(cache.level))?;
if let Some(size) = cache.size_ {
fdt.property_u32(cache.type_.of_cache_size(), size)?;
}
if let Some(line_size) = cache.line_size {
fdt.property_u32(cache.type_.of_cache_line_size(), u32::from(line_size))?;
}
if let Some(number_of_sets) = cache.number_of_sets {
fdt.property_u32(cache.type_.of_cache_sets(), number_of_sets)?;
}
if let Some(cache_type) = cache.type_.of_cache_type() {
fdt.property_null(cache_type)?;
}
prev_level = cache.level;
}
}
if let Some(node) = cache_node {
fdt.end_node(node)?;
}
fdt.end_node(cpu)?;
}
fdt.end_node(cpus)?;
Ok(())
}
fn create_memory_node(fdt: &mut FdtWriter, guest_mem: &GuestMemoryMmap) -> Result<(), FdtError> {
// See https://github.com/torvalds/linux/blob/master/Documentation/devicetree/booting-without-of.txt#L960
// for an explanation of this.
// On ARM we reserve some memory so that it can be utilized for devices like VMGenID to send
// data to kernel drivers. The range of this memory is:
//
// [layout::DRAM_MEM_START, layout::DRAM_MEM_START + layout::SYSTEM_MEM_SIZE)
//
// The reason we do this is that Linux does not allow remapping system memory. However, without
// remap, kernel drivers cannot get virtual addresses to read data from device memory. Leaving
// this memory region out allows Linux kernel modules to remap and thus read this region.
// Pick the first (and only) memory region
let dram_region = guest_mem
.iter()
.find(|region| region.region_type == GuestRegionType::Dram)
.unwrap();
// Find the start of memory after the system memory region
let start_addr = dram_region
.start_addr()
.unchecked_add(super::layout::SYSTEM_MEM_SIZE);
// Size of the memory is the region size minus the system memory size
let mem_size = dram_region.len() - super::layout::SYSTEM_MEM_SIZE;
let mem_reg_prop = &[start_addr.raw_value(), mem_size];
let mem = fdt.begin_node("memory@ram")?;
fdt.property_string("device_type", "memory")?;
fdt.property_array_u64("reg", mem_reg_prop)?;
fdt.end_node(mem)?;
Ok(())
}
fn create_chosen_node(
fdt: &mut FdtWriter,
cmdline: CString,
initrd: &Option<InitrdConfig>,
) -> Result<(), FdtError> {
let chosen = fdt.begin_node("chosen")?;
// Workaround to be able to reuse an existing property_*() method; in property_string() method,
// the cmdline is reconverted to a CString to be written in memory as a null terminated string.
let cmdline_string = cmdline
.into_string()
.map_err(|_| vm_fdt::Error::InvalidString)?;
fdt.property_string("bootargs", cmdline_string.as_str())?;
if let Some(initrd_config) = initrd {
fdt.property_u64("linux,initrd-start", initrd_config.address.raw_value())?;
fdt.property_u64(
"linux,initrd-end",
initrd_config.address.raw_value() + initrd_config.size as u64,
)?;
}
fdt.end_node(chosen)?;
Ok(())
}
fn create_vmgenid_node(fdt: &mut FdtWriter, vmgenid: &VmGenId) -> Result<(), FdtError> {
let vmgenid_node = fdt.begin_node("vmgenid")?;
fdt.property_string("compatible", "microsoft,vmgenid")?;
fdt.property_array_u64("reg", &[vmgenid.guest_address.0, VMGENID_MEM_SIZE])?;
fdt.property_array_u32(
"interrupts",
&[GIC_FDT_IRQ_TYPE_SPI, vmgenid.gsi, IRQ_TYPE_EDGE_RISING],
)?;
fdt.end_node(vmgenid_node)?;
Ok(())
}
fn create_gic_node(fdt: &mut FdtWriter, gic_device: &GICDevice) -> Result<(), FdtError> {
let interrupt = fdt.begin_node("intc")?;
fdt.property_string("compatible", gic_device.fdt_compatibility())?;
fdt.property_null("interrupt-controller")?;
// "interrupt-cells" field specifies the number of cells needed to encode an
// interrupt source. The type shall be a <u32> and the value shall be 3 if no PPI affinity
// description is required.
fdt.property_u32("#interrupt-cells", 3)?;
fdt.property_array_u64("reg", gic_device.device_properties())?;
fdt.property_u32("phandle", GIC_PHANDLE)?;
fdt.property_u32("#address-cells", 2)?;
fdt.property_u32("#size-cells", 2)?;
fdt.property_null("ranges")?;
let gic_intr = [
GIC_FDT_IRQ_TYPE_PPI,
gic_device.fdt_maint_irq(),
IRQ_TYPE_LEVEL_HI,
];
fdt.property_array_u32("interrupts", &gic_intr)?;
if let Some(msi_properties) = gic_device.msi_properties() {
let msic_node = fdt.begin_node("msic")?;
fdt.property_string("compatible", "arm,gic-v3-its")?;
fdt.property_null("msi-controller")?;
fdt.property_u32("phandle", MSI_PHANDLE)?;
fdt.property_array_u64("reg", msi_properties)?;
fdt.end_node(msic_node)?;
}
fdt.end_node(interrupt)?;
Ok(())
}
fn create_clock_node(fdt: &mut FdtWriter) -> Result<(), FdtError> {
// The Advanced Peripheral Bus (APB) is part of the Advanced Microcontroller Bus Architecture
// (AMBA) protocol family. It defines a low-cost interface that is optimized for minimal power
// consumption and reduced interface complexity.
// PCLK is the clock source and this node defines exactly the clock for the APB.
let clock = fdt.begin_node("apb-pclk")?;
fdt.property_string("compatible", "fixed-clock")?;
fdt.property_u32("#clock-cells", 0x0)?;
fdt.property_u32("clock-frequency", 24_000_000)?;
fdt.property_string("clock-output-names", "clk24mhz")?;
fdt.property_u32("phandle", CLOCK_PHANDLE)?;
fdt.end_node(clock)?;
Ok(())
}
fn create_timer_node(fdt: &mut FdtWriter) -> Result<(), FdtError> {
// See
// https://github.com/torvalds/linux/blob/master/Documentation/devicetree/bindings/interrupt-controller/arch_timer.txt
// These are fixed interrupt numbers for the timer device.
let irqs = [13, 14, 11, 10];
let compatible = "arm,armv8-timer";
let mut timer_reg_cells: Vec<u32> = Vec::new();
for &irq in irqs.iter() {
timer_reg_cells.push(GIC_FDT_IRQ_TYPE_PPI);
timer_reg_cells.push(irq);
timer_reg_cells.push(IRQ_TYPE_LEVEL_HI);
}
let timer = fdt.begin_node("timer")?;
fdt.property_string("compatible", compatible)?;
fdt.property_null("always-on")?;
fdt.property_array_u32("interrupts", &timer_reg_cells)?;
fdt.end_node(timer)?;
Ok(())
}
fn create_psci_node(fdt: &mut FdtWriter) -> Result<(), FdtError> {
let compatible = "arm,psci-0.2";
let psci = fdt.begin_node("psci")?;
fdt.property_string("compatible", compatible)?;
// Two methods available: hvc and smc.
// As per documentation, PSCI calls between a guest and hypervisor may use the HVC conduit
// instead of SMC. So, since we are using kvm, we need to use hvc.
fdt.property_string("method", "hvc")?;
fdt.end_node(psci)?;
Ok(())
}
fn create_virtio_node(fdt: &mut FdtWriter, dev_info: &MMIODeviceInfo) -> Result<(), FdtError> {
let virtio_mmio = fdt.begin_node(&format!("virtio_mmio@{:x}", dev_info.addr))?;
// Adding the dma-coherent property ensures that the guest driver allocates the virtio
// queue with the Write-Back attribute, maintaining cache coherency with Firecracker's
// accesses to the virtio queue.
fdt.property_null("dma-coherent")?;
fdt.property_string("compatible", "virtio,mmio")?;
fdt.property_array_u64("reg", &[dev_info.addr, dev_info.len])?;
fdt.property_array_u32(
"interrupts",
&[
GIC_FDT_IRQ_TYPE_SPI,
dev_info.gsi.unwrap(),
IRQ_TYPE_EDGE_RISING,
],
)?;
fdt.property_u32("interrupt-parent", GIC_PHANDLE)?;
fdt.end_node(virtio_mmio)?;
Ok(())
}
fn create_serial_node(fdt: &mut FdtWriter, dev_info: &MMIODeviceInfo) -> Result<(), FdtError> {
let serial = fdt.begin_node(&format!("uart@{:x}", dev_info.addr))?;
fdt.property_string("compatible", "ns16550a")?;
fdt.property_array_u64("reg", &[dev_info.addr, dev_info.len])?;
fdt.property_u32("clocks", CLOCK_PHANDLE)?;
fdt.property_string("clock-names", "apb_pclk")?;
fdt.property_array_u32(
"interrupts",
&[
GIC_FDT_IRQ_TYPE_SPI,
dev_info.gsi.unwrap(),
IRQ_TYPE_EDGE_RISING,
],
)?;
fdt.end_node(serial)?;
Ok(())
}
fn create_rtc_node(fdt: &mut FdtWriter, dev_info: &MMIODeviceInfo) -> Result<(), FdtError> {
// Driver requirements:
// https://elixir.bootlin.com/linux/latest/source/Documentation/devicetree/bindings/rtc/arm,pl031.yaml
// We do not offer the `interrupt` property because the device
// does not implement interrupt support.
let compatible = b"arm,pl031\0arm,primecell\0";
let rtc = fdt.begin_node(&format!("rtc@{:x}", dev_info.addr))?;
fdt.property("compatible", compatible)?;
fdt.property_array_u64("reg", &[dev_info.addr, dev_info.len])?;
fdt.property_u32("clocks", CLOCK_PHANDLE)?;
fdt.property_string("clock-names", "apb_pclk")?;
fdt.end_node(rtc)?;
Ok(())
}
fn create_devices_node(
fdt: &mut FdtWriter,
device_manager: &DeviceManager,
) -> Result<(), FdtError> {
if let Some(rtc_info) = device_manager.mmio_devices.rtc_device_info() {
create_rtc_node(fdt, rtc_info)?;
}
if let Some(serial_info) = device_manager.mmio_devices.serial_device_info() {
create_serial_node(fdt, serial_info)?;
}
let mut virtio_mmio = device_manager.mmio_devices.virtio_device_info();
// Sort out virtio devices by address from low to high and insert them into fdt table.
virtio_mmio.sort_by_key(|a| a.addr);
for ordered_device_info in virtio_mmio.drain(..) {
create_virtio_node(fdt, ordered_device_info)?;
}
Ok(())
}
fn create_pci_nodes(fdt: &mut FdtWriter, pci_devices: &PciDevices) -> Result<(), FdtError> {
if pci_devices.pci_segment.is_none() {
return Ok(());
}
// Fine to unwrap here, we just checked it's not `None`.
let segment = pci_devices.pci_segment.as_ref().unwrap();
let pci_node_name = format!("pci@{:x}", segment.mmio_config_address);
// Each range here is a thruple of `(PCI address, CPU address, PCI size)`.
//
// More info about the format can be found here:
// https://elinux.org/Device_Tree_Usage#PCI_Address_Translation
let ranges = [
// 32bit addresses
0x200_0000u32,
(MEM_32BIT_DEVICES_START >> 32) as u32, // PCI address
(MEM_32BIT_DEVICES_START & 0xffff_ffff) as u32,
(MEM_32BIT_DEVICES_START >> 32) as u32, // CPU address
(MEM_32BIT_DEVICES_START & 0xffff_ffff) as u32,
(MEM_32BIT_DEVICES_SIZE >> 32) as u32, // Range size
(MEM_32BIT_DEVICES_SIZE & 0xffff_ffff) as u32,
// 64bit addresses
0x300_0000u32,
// PCI address
(MEM_64BIT_DEVICES_START >> 32) as u32, // PCI address
(MEM_64BIT_DEVICES_START & 0xffff_ffff) as u32,
// CPU address
(MEM_64BIT_DEVICES_START >> 32) as u32, // CPU address
(MEM_64BIT_DEVICES_START & 0xffff_ffff) as u32,
// Range size
(MEM_64BIT_DEVICES_SIZE >> 32) as u32, // Range size
((MEM_64BIT_DEVICES_SIZE & 0xffff_ffff) >> 32) as u32,
];
// See kernel document Documentation/devicetree/bindings/pci/pci-msi.txt
let msi_map = [
// rid-base: A single cell describing the first RID matched by the entry.
0x0,
// msi-controller: A single phandle to an MSI controller.
MSI_PHANDLE,
// msi-base: An msi-specifier describing the msi-specifier produced for the
// first RID matched by the entry.
segment.id as u32,
// length: A single cell describing how many consecutive RIDs are matched
// following the rid-base.
0x100,
];
let pci_node = fdt.begin_node(&pci_node_name)?;
fdt.property_string("compatible", "pci-host-ecam-generic")?;
fdt.property_string("device_type", "pci")?;
fdt.property_array_u32("ranges", &ranges)?;
fdt.property_array_u32("bus-range", &[0, 0])?;
fdt.property_u32("linux,pci-domain", segment.id.into())?;
fdt.property_u32("#address-cells", 3)?;
fdt.property_u32("#size-cells", 2)?;
fdt.property_array_u64(
"reg",
&[
segment.mmio_config_address,
PCI_MMIO_CONFIG_SIZE_PER_SEGMENT,
],
)?;
fdt.property_u32("#interrupt-cells", 1)?;
fdt.property_null("interrupt-map")?;
fdt.property_null("interrupt-map-mask")?;
fdt.property_null("dma-coherent")?;
fdt.property_array_u32("msi-map", &msi_map)?;
fdt.property_u32("msi-parent", MSI_PHANDLE)?;
Ok(fdt.end_node(pci_node)?)
}
#[cfg(test)]
mod tests {
use std::ffi::CString;
use std::sync::{Arc, Mutex};
use linux_loader::cmdline as kernel_cmdline;
use super::*;
use crate::arch::aarch64::gic::create_gic;
use crate::arch::aarch64::layout;
use crate::device_manager::mmio::tests::DummyDevice;
use crate::device_manager::tests::default_device_manager;
use crate::test_utils::arch_mem;
use crate::vstate::memory::GuestAddress;
use crate::{EventManager, Kvm, Vm};
// The `load` function from the `device_tree` will mistakenly check the actual size
// of the buffer with the allocated size. This works around that.
fn set_size(buf: &mut [u8], pos: usize, val: u32) {
buf[pos] = ((val >> 24) & 0xff) as u8;
buf[pos + 1] = ((val >> 16) & 0xff) as u8;
buf[pos + 2] = ((val >> 8) & 0xff) as u8;
buf[pos + 3] = (val & 0xff) as u8;
}
#[test]
fn test_create_fdt_with_devices() {
let mem = arch_mem(layout::FDT_MAX_SIZE + 0x1000);
let mut event_manager = EventManager::new().unwrap();
let mut device_manager = default_device_manager();
let kvm = Kvm::new(vec![]).unwrap();
let vm = Vm::new(&kvm).unwrap();
let gic = create_gic(vm.fd(), 1, None).unwrap();
let mut cmdline = kernel_cmdline::Cmdline::new(4096).unwrap();
cmdline.insert("console", "/dev/tty0").unwrap();
device_manager
.attach_legacy_devices_aarch64(&vm, &mut event_manager, &mut cmdline, None)
.unwrap();
let dummy = Arc::new(Mutex::new(DummyDevice::new()));
device_manager
.mmio_devices
.register_virtio_test_device(&vm, mem.clone(), dummy, &mut cmdline, "dummy")
.unwrap();
create_fdt(
&mem,
vec![0],
cmdline.as_cstring().unwrap(),
&device_manager,
&gic,
&None,
)
.unwrap();
}
#[test]
fn test_create_fdt() {
let mem = arch_mem(layout::FDT_MAX_SIZE + 0x1000);
let device_manager = default_device_manager();
let kvm = Kvm::new(vec![]).unwrap();
let vm = Vm::new(&kvm).unwrap();
let gic = create_gic(vm.fd(), 1, None).unwrap();
let saved_dtb_bytes = match gic.fdt_compatibility() {
"arm,gic-v3" => include_bytes!("output_GICv3.dtb"),
"arm,gic-400" => include_bytes!("output_GICv2.dtb"),
_ => panic!("Unexpected gic version!"),
};
let current_dtb_bytes = create_fdt(
&mem,
vec![0],
CString::new("console=tty0").unwrap(),
&device_manager,
&gic,
&None,
)
.unwrap();
// Use this code when wanting to generate a new DTB sample.
// {
// use std::fs;
// use std::io::Write;
// use std::path::PathBuf;
// let path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
// let dtb_path = match gic.fdt_compatibility() {
// "arm,gic-v3" => "output_GICv3.dtb",
// "arm,gic-400" => ("output_GICv2.dtb"),
// _ => panic!("Unexpected gic version!"),
// };
// let mut output = fs::OpenOptions::new()
// .write(true)
// .create(true)
// .open(path.join(format!("src/arch/aarch64/{}", dtb_path)))
// .unwrap();
// output.write_all(¤t_dtb_bytes).unwrap();
// }
let pos = 4;
let val = u32::try_from(layout::FDT_MAX_SIZE).unwrap();
let mut buf = vec![];
buf.extend_from_slice(saved_dtb_bytes);
set_size(&mut buf, pos, val);
let original_fdt = device_tree::DeviceTree::load(&buf).unwrap();
let generated_fdt = device_tree::DeviceTree::load(¤t_dtb_bytes).unwrap();
assert_eq!(
format!("{:?}", original_fdt),
format!("{:?}", generated_fdt)
);
}
#[test]
fn test_create_fdt_with_initrd() {
let mem = arch_mem(layout::FDT_MAX_SIZE + 0x1000);
let device_manager = default_device_manager();
let kvm = Kvm::new(vec![]).unwrap();
let vm = Vm::new(&kvm).unwrap();
let gic = create_gic(vm.fd(), 1, None).unwrap();
let saved_dtb_bytes = match gic.fdt_compatibility() {
"arm,gic-v3" => include_bytes!("output_initrd_GICv3.dtb"),
"arm,gic-400" => include_bytes!("output_initrd_GICv2.dtb"),
_ => panic!("Unexpected gic version!"),
};
let initrd = InitrdConfig {
address: GuestAddress(0x1000_0000),
size: 0x1000,
};
let current_dtb_bytes = create_fdt(
&mem,
vec![0],
CString::new("console=tty0").unwrap(),
&device_manager,
&gic,
&Some(initrd),
)
.unwrap();
// Use this code when wanting to generate a new DTB sample.
// {
// use std::fs;
// use std::io::Write;
// use std::path::PathBuf;
// let path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
// let dtb_path = match gic.fdt_compatibility() {
// "arm,gic-v3" => "output_initrd_GICv3.dtb",
// "arm,gic-400" => ("output_initrd_GICv2.dtb"),
// _ => panic!("Unexpected gic version!"),
// };
// let mut output = fs::OpenOptions::new()
// .write(true)
// .create(true)
// .open(path.join(format!("src/arch/aarch64/{}", dtb_path)))
// .unwrap();
// output.write_all(¤t_dtb_bytes).unwrap();
// }
let pos = 4;
let val = u32::try_from(layout::FDT_MAX_SIZE).unwrap();
let mut buf = vec![];
buf.extend_from_slice(saved_dtb_bytes);
set_size(&mut buf, pos, val);
let original_fdt = device_tree::DeviceTree::load(&buf).unwrap();
let generated_fdt = device_tree::DeviceTree::load(¤t_dtb_bytes).unwrap();
assert_eq!(
format!("{:?}", original_fdt),
format!("{:?}", generated_fdt)
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/mod.rs | src/vmm/src/arch/aarch64/mod.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub(crate) mod cache_info;
mod fdt;
/// Module for the global interrupt controller configuration.
pub mod gic;
/// Architecture specific KVM-related code
pub mod kvm;
/// Layout for this aarch64 system.
pub mod layout;
/// Logic for configuring aarch64 registers.
pub mod regs;
/// Architecture specific vCPU code
pub mod vcpu;
/// Architecture specific VM state code
pub mod vm;
use std::cmp::min;
use std::fmt::Debug;
use std::fs::File;
use linux_loader::loader::pe::PE as Loader;
use linux_loader::loader::{Cmdline, KernelLoader};
use vm_memory::{GuestMemoryError, GuestMemoryRegion};
use crate::arch::{BootProtocol, EntryPoint, arch_memory_regions_with_gap};
use crate::cpu_config::aarch64::{CpuConfiguration, CpuConfigurationError};
use crate::cpu_config::templates::CustomCpuTemplate;
use crate::initrd::InitrdConfig;
use crate::utils::{align_up, u64_to_usize, usize_to_u64};
use crate::vmm_config::machine_config::MachineConfig;
use crate::vstate::memory::{
Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionType,
};
use crate::vstate::vcpu::KvmVcpuError;
use crate::{DeviceManager, Kvm, Vcpu, VcpuConfig, Vm, logger};
/// Errors thrown while configuring aarch64 system.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum ConfigurationError {
/// Failed to create a Flattened Device Tree for this aarch64 microVM: {0}
SetupFDT(#[from] fdt::FdtError),
/// Failed to write to guest memory.
MemoryError(#[from] GuestMemoryError),
/// Cannot copy kernel file fd
KernelFile,
/// Cannot load kernel due to invalid memory configuration or invalid kernel image: {0}
KernelLoader(#[from] linux_loader::loader::Error),
/// Error creating vcpu configuration: {0}
VcpuConfig(#[from] CpuConfigurationError),
/// Error configuring the vcpu: {0}
VcpuConfigure(#[from] KvmVcpuError),
}
/// Returns a Vec of the valid memory addresses for aarch64.
/// See [`layout`](layout) module for a drawing of the specific memory model for this platform.
pub fn arch_memory_regions(size: usize) -> Vec<(GuestAddress, usize)> {
assert!(size > 0, "Attempt to allocate guest memory of length 0");
let dram_size = min(size, layout::DRAM_MEM_MAX_SIZE);
if dram_size != size {
logger::warn!(
"Requested memory size {} exceeds architectural maximum (1022GiB). Size has been \
truncated to {}",
size,
dram_size
);
}
let mut regions = vec![];
if let Some((offset, remaining)) = arch_memory_regions_with_gap(
&mut regions,
u64_to_usize(layout::DRAM_MEM_START),
dram_size,
u64_to_usize(layout::MMIO64_MEM_START),
u64_to_usize(layout::MMIO64_MEM_SIZE),
) {
regions.push((GuestAddress(offset as u64), remaining));
}
regions
}
/// Configures the system for booting Linux.
#[allow(clippy::too_many_arguments)]
pub fn configure_system_for_boot(
kvm: &Kvm,
vm: &Vm,
device_manager: &mut DeviceManager,
vcpus: &mut [Vcpu],
machine_config: &MachineConfig,
cpu_template: &CustomCpuTemplate,
entry_point: EntryPoint,
initrd: &Option<InitrdConfig>,
boot_cmdline: Cmdline,
) -> Result<(), ConfigurationError> {
// Construct the base CpuConfiguration to apply CPU template onto.
let cpu_config = CpuConfiguration::new(cpu_template, vcpus)?;
// Apply CPU template to the base CpuConfiguration.
let cpu_config = CpuConfiguration::apply_template(cpu_config, cpu_template);
let vcpu_config = VcpuConfig {
vcpu_count: machine_config.vcpu_count,
smt: machine_config.smt,
cpu_config,
};
let optional_capabilities = kvm.optional_capabilities();
// Configure vCPUs with normalizing and setting the generated CPU configuration.
for vcpu in vcpus.iter_mut() {
vcpu.kvm_vcpu.configure(
vm.guest_memory(),
entry_point,
&vcpu_config,
&optional_capabilities,
)?;
}
let vcpu_mpidr = vcpus
.iter_mut()
.map(|cpu| cpu.kvm_vcpu.get_mpidr())
.collect::<Result<Vec<_>, _>>()
.map_err(KvmVcpuError::ConfigureRegisters)?;
let cmdline = boot_cmdline
.as_cstring()
.expect("Cannot create cstring from cmdline string");
let fdt = fdt::create_fdt(
vm.guest_memory(),
vcpu_mpidr,
cmdline,
device_manager,
vm.get_irqchip(),
initrd,
)?;
let fdt_address = GuestAddress(get_fdt_addr(vm.guest_memory()));
vm.guest_memory().write_slice(fdt.as_slice(), fdt_address)?;
Ok(())
}
/// Returns the memory address where the kernel could be loaded.
pub fn get_kernel_start() -> u64 {
layout::SYSTEM_MEM_START + layout::SYSTEM_MEM_SIZE
}
/// Returns the memory address where the initrd could be loaded.
pub fn initrd_load_addr(guest_mem: &GuestMemoryMmap, initrd_size: usize) -> Option<u64> {
let rounded_size = align_up(
usize_to_u64(initrd_size),
usize_to_u64(super::GUEST_PAGE_SIZE),
);
GuestAddress(get_fdt_addr(guest_mem))
.checked_sub(rounded_size)
.filter(|&addr| guest_mem.address_in_range(addr))
.map(|addr| addr.raw_value())
}
// Auxiliary function to get the address where the device tree blob is loaded.
fn get_fdt_addr(mem: &GuestMemoryMmap) -> u64 {
// Find the first (and only) DRAM region.
let dram_region = mem
.iter()
.find(|region| region.region_type == GuestRegionType::Dram)
.unwrap();
// If the memory allocated is smaller than the size allocated for the FDT,
// we return the start of the DRAM so that
// we allow the code to try and load the FDT.
dram_region
.last_addr()
.checked_sub(layout::FDT_MAX_SIZE as u64 - 1)
.filter(|&addr| mem.address_in_range(addr))
.map(|addr| addr.raw_value())
.unwrap_or(layout::DRAM_MEM_START)
}
/// Load linux kernel into guest memory.
pub fn load_kernel(
kernel: &File,
guest_memory: &GuestMemoryMmap,
) -> Result<EntryPoint, ConfigurationError> {
// Need to clone the File because reading from it
// mutates it.
let mut kernel_file = kernel
.try_clone()
.map_err(|_| ConfigurationError::KernelFile)?;
let entry_addr = Loader::load(
guest_memory,
Some(GuestAddress(get_kernel_start())),
&mut kernel_file,
None,
)?;
Ok(EntryPoint {
entry_addr: entry_addr.kernel_load,
protocol: BootProtocol::LinuxBoot,
})
}
#[cfg(kani)]
mod verification {
use crate::arch::aarch64::layout::{
DRAM_MEM_MAX_SIZE, DRAM_MEM_START, FIRST_ADDR_PAST_64BITS_MMIO, MMIO64_MEM_START,
};
use crate::arch::arch_memory_regions;
#[kani::proof]
#[kani::unwind(3)]
fn verify_arch_memory_regions() {
let len: usize = kani::any::<usize>();
kani::assume(len > 0);
let regions = arch_memory_regions(len);
for region in ®ions {
println!(
"region: [{:x}:{:x})",
region.0.0,
region.0.0 + region.1 as u64
);
}
// On Arm we have one MMIO gap that might fall within addressable ranges,
// so we can get either 1 or 2 regions.
assert!(regions.len() >= 1);
assert!(regions.len() <= 2);
// The total length of all regions cannot exceed DRAM_MEM_MAX_SIZE
let actual_len = regions.iter().map(|&(_, len)| len).sum::<usize>();
assert!(actual_len <= DRAM_MEM_MAX_SIZE);
// The total length is smaller or equal to the length we asked
assert!(actual_len <= len);
// If it's smaller, it's because we asked more than the the maximum possible.
if (actual_len) < len {
assert!(len > DRAM_MEM_MAX_SIZE);
}
// No region overlaps the 64-bit MMIO gap
assert!(
regions
.iter()
.all(|&(start, len)| start.0 >= FIRST_ADDR_PAST_64BITS_MMIO
|| start.0 + len as u64 <= MMIO64_MEM_START)
);
// All regions start after our DRAM_MEM_START
assert!(regions.iter().all(|&(start, _)| start.0 >= DRAM_MEM_START));
// All regions have non-zero length
assert!(regions.iter().all(|&(_, len)| len > 0));
// If there's two regions, they perfectly snuggle up the 64bit MMIO gap
if regions.len() == 2 {
kani::cover!();
// The very first address should be DRAM_MEM_START
assert_eq!(regions[0].0.0, DRAM_MEM_START);
// The first region ends at the beginning of the 64 bits gap.
assert_eq!(regions[0].0.0 + regions[0].1 as u64, MMIO64_MEM_START);
// The second region starts exactly after the 64 bits gap.
assert_eq!(regions[1].0.0, FIRST_ADDR_PAST_64BITS_MMIO);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::arch::aarch64::layout::{
DRAM_MEM_MAX_SIZE, DRAM_MEM_START, FDT_MAX_SIZE, FIRST_ADDR_PAST_64BITS_MMIO,
MMIO64_MEM_START,
};
use crate::test_utils::arch_mem;
#[test]
fn test_regions_lt_1024gb() {
let regions = arch_memory_regions(1usize << 29);
assert_eq!(1, regions.len());
assert_eq!(GuestAddress(DRAM_MEM_START), regions[0].0);
assert_eq!(1usize << 29, regions[0].1);
}
#[test]
fn test_regions_gt_1024gb() {
let regions = arch_memory_regions(1usize << 41);
assert_eq!(2, regions.len());
assert_eq!(GuestAddress(DRAM_MEM_START), regions[0].0);
assert_eq!(MMIO64_MEM_START - DRAM_MEM_START, regions[0].1 as u64);
assert_eq!(GuestAddress(FIRST_ADDR_PAST_64BITS_MMIO), regions[1].0);
assert_eq!(
DRAM_MEM_MAX_SIZE as u64 - MMIO64_MEM_START + DRAM_MEM_START,
regions[1].1 as u64
);
}
#[test]
fn test_get_fdt_addr() {
let mem = arch_mem(FDT_MAX_SIZE - 0x1000);
assert_eq!(get_fdt_addr(&mem), DRAM_MEM_START);
let mem = arch_mem(FDT_MAX_SIZE);
assert_eq!(get_fdt_addr(&mem), DRAM_MEM_START);
let mem = arch_mem(FDT_MAX_SIZE + 0x1000);
assert_eq!(get_fdt_addr(&mem), 0x1000 + DRAM_MEM_START);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/layout.rs | src/vmm/src/arch/aarch64/layout.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// ==== Address map in use in ARM development systems today ====
//
// - 32-bit - - 36-bit - - 40-bit -
// 1024GB + + +-------------------+ <- 40-bit
// | | DRAM |
// ~ ~ ~ ~
// | | |
// | | |
// | | |
// | | |
// 544GB + + +-------------------+
// | | Hole or DRAM |
// | | |
// 512GB + + +-------------------+
// | | Mapped |
// | | I/O |
// ~ ~ ~ ~
// | | |
// 256GB + + +-------------------+
// | | Reserved |
// ~ ~ ~ ~
// | | |
// 64GB + +-----------------------+-------------------+ <- 36-bit
// | | DRAM |
// ~ ~ ~ ~
// | | |
// | | |
// 34GB + +-----------------------+-------------------+
// | | Hole or DRAM |
// 32GB + +-----------------------+-------------------+
// | | Mapped I/O |
// ~ ~ ~ ~
// | | |
// 16GB + +-----------------------+-------------------+
// | | Reserved |
// ~ ~ ~ ~
// 4GB +-------------------+-----------------------+-------------------+ <- 32-bit
// | 2GB of DRAM |
// | |
// 2GB +-------------------+-----------------------+-------------------+
// | Mapped I/O |
// 1GB +-------------------+-----------------------+-------------------+
// | ROM & RAM & I/O |
// 0GB +-------------------+-----------------------+-------------------+ 0
// - 32-bit - - 36-bit - - 40-bit -
//
// Taken from (http://infocenter.arm.com/help/topic/com.arm.doc.den0001c/DEN0001C_principles_of_arm_memory_maps.pdf).
use crate::device_manager::mmio::MMIO_LEN;
/// Start of RAM on 64 bit ARM.
pub const DRAM_MEM_START: u64 = 0x8000_0000; // 2 GB.
/// The maximum RAM size.
pub const DRAM_MEM_MAX_SIZE: usize = 0x00FF_8000_0000; // 1024 - 2 = 1022G.
/// Start of RAM on 64 bit ARM.
pub const SYSTEM_MEM_START: u64 = DRAM_MEM_START;
/// This is used by ACPI device manager for acpi tables or devices like vmgenid
/// In reality, 2MBs is an overkill, but immediately after this we write the kernel
/// image, which needs to be 2MB aligned.
pub const SYSTEM_MEM_SIZE: u64 = 0x20_0000;
/// Kernel command line maximum size.
/// As per `arch/arm64/include/uapi/asm/setup.h`.
pub const CMDLINE_MAX_SIZE: usize = 2048;
/// Maximum size of the device tree blob as specified in https://www.kernel.org/doc/Documentation/arm64/booting.txt.
pub const FDT_MAX_SIZE: usize = 0x20_0000;
// As per virt/kvm/arm/vgic/vgic-kvm-device.c we need
// the number of interrupts our GIC will support to be:
// * bigger than 32
// * less than 1023 and
// * a multiple of 32.
// The first 32 SPIs are reserved, but KVM already shifts the gsi we
// pass, so we go from 0 to 95 for legacy gsis ("irq") and the remaining
// we use for MSI.
/// Offset of first SPI in the GIC
pub const SPI_START: u32 = 32;
/// Last possible SPI in the GIC (128 total SPIs)
pub const SPI_END: u32 = 127;
/// First usable GSI id on aarch64 (corresponds to SPI #32).
pub const GSI_LEGACY_START: u32 = 0;
/// There are 128 SPIs available, but the first 32 are reserved
pub const GSI_LEGACY_NUM: u32 = SPI_END - SPI_START + 1;
/// Last available GSI
pub const GSI_LEGACY_END: u32 = GSI_LEGACY_START + GSI_LEGACY_NUM - 1;
/// First GSI used by MSI after legacy GSI
pub const GSI_MSI_START: u32 = GSI_LEGACY_END + 1;
/// The highest available GSI in KVM (KVM_MAX_IRQ_ROUTES=4096)
pub const GSI_MSI_END: u32 = 4095;
/// Number of GSI available for MSI.
pub const GSI_MSI_NUM: u32 = GSI_MSI_END - GSI_MSI_START + 1;
/// The start of the memory area reserved for MMIO 32-bit accesses.
/// Below this address will reside the GIC, above this address will reside the MMIO devices.
pub const MMIO32_MEM_START: u64 = 1 << 30; // 1GiB
/// The size of the memory area reserved for MMIO 32-bit accesses (1GiB).
pub const MMIO32_MEM_SIZE: u64 = DRAM_MEM_START - MMIO32_MEM_START;
// The rest of the MMIO address space (256 MiB) we dedicate to PCIe for memory-mapped access to
// configuration.
/// Size of MMIO region for PCIe configuration accesses.
pub const PCI_MMCONFIG_SIZE: u64 = 256 << 20;
/// Start of MMIO region for PCIe configuration accesses.
pub const PCI_MMCONFIG_START: u64 = DRAM_MEM_START - PCI_MMCONFIG_SIZE;
/// MMIO space per PCIe segment
pub const PCI_MMIO_CONFIG_SIZE_PER_SEGMENT: u64 = 4096 * 256;
// We reserve 768 MiB for devices at the beginning of the MMIO region. This includes space both for
// pure MMIO and PCIe devices.
/// Memory region start for boot device.
pub const BOOT_DEVICE_MEM_START: u64 = MMIO32_MEM_START;
/// Memory region start for RTC device.
pub const RTC_MEM_START: u64 = BOOT_DEVICE_MEM_START + MMIO_LEN;
/// Memory region start for Serial device.
pub const SERIAL_MEM_START: u64 = RTC_MEM_START + MMIO_LEN;
/// Beginning of memory region for device MMIO 32-bit accesses
pub const MEM_32BIT_DEVICES_START: u64 = SERIAL_MEM_START + MMIO_LEN;
/// Size of memory region for device MMIO 32-bit accesses
pub const MEM_32BIT_DEVICES_SIZE: u64 = PCI_MMCONFIG_START - MEM_32BIT_DEVICES_START;
// 64-bits region for MMIO accesses
/// The start of the memory area reserved for MMIO 64-bit accesses.
pub const MMIO64_MEM_START: u64 = 256 << 30;
/// The size of the memory area reserved for MMIO 64-bit accesses.
pub const MMIO64_MEM_SIZE: u64 = 256 << 30;
// At the moment, all of this region goes to devices
/// Beginning of memory region for device MMIO 64-bit accesses
pub const MEM_64BIT_DEVICES_START: u64 = MMIO64_MEM_START;
/// Size of memory region for device MMIO 32-bit accesses
pub const MEM_64BIT_DEVICES_SIZE: u64 = MMIO64_MEM_SIZE;
/// First address past the 64-bit MMIO gap
pub const FIRST_ADDR_PAST_64BITS_MMIO: u64 = MMIO64_MEM_START + MMIO64_MEM_SIZE;
/// Size of the memory past 64-bit MMIO gap
pub const PAST_64BITS_MMIO_SIZE: u64 = 512 << 30;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/regs.rs | src/vmm/src/arch/aarch64/regs.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::Write;
use std::mem::offset_of;
use kvm_bindings::*;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[allow(non_upper_case_globals)]
/// PSR (Processor State Register) bits.
/// Taken from arch/arm64/include/uapi/asm/ptrace.h.
const PSR_MODE_EL1h: u64 = 0x0000_0005;
const PSR_F_BIT: u64 = 0x0000_0040;
const PSR_I_BIT: u64 = 0x0000_0080;
const PSR_A_BIT: u64 = 0x0000_0100;
const PSR_D_BIT: u64 = 0x0000_0200;
/// Taken from arch/arm64/kvm/inject_fault.c.
pub const PSTATE_FAULT_BITS_64: u64 = PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | PSR_I_BIT | PSR_D_BIT;
/// Gets a core id.
macro_rules! arm64_core_reg_id {
($size: ident, $offset: expr) => {
// The core registers of an arm64 machine are represented
// in kernel by the `kvm_regs` structure. This structure is a
// mix of 32, 64 and 128 bit fields:
// struct kvm_regs {
// struct user_pt_regs regs;
//
// __u64 sp_el1;
// __u64 elr_el1;
//
// __u64 spsr[KVM_NR_SPSR];
//
// struct user_fpsimd_state fp_regs;
// };
// struct user_pt_regs {
// __u64 regs[31];
// __u64 sp;
// __u64 pc;
// __u64 pstate;
// };
// The id of a core register can be obtained like this:
// offset = id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE). Thus,
// id = KVM_REG_ARM64 | KVM_REG_SIZE_U64/KVM_REG_SIZE_U32/KVM_REG_SIZE_U128 |
// KVM_REG_ARM_CORE | offset
KVM_REG_ARM64 as u64
| KVM_REG_ARM_CORE as u64
| $size
| ($offset / std::mem::size_of::<u32>()) as u64
};
}
pub(crate) use arm64_core_reg_id;
/// This macro computes the ID of a specific ARM64 system register similar to how
/// the kernel C macro does.
/// https://elixir.bootlin.com/linux/v4.20.17/source/arch/arm64/include/uapi/asm/kvm.h#L203
macro_rules! arm64_sys_reg {
($name: tt, $op0: tt, $op1: tt, $crn: tt, $crm: tt, $op2: tt) => {
/// System register constant
pub const $name: u64 = KVM_REG_ARM64 as u64
| KVM_REG_SIZE_U64 as u64
| KVM_REG_ARM64_SYSREG as u64
| ((($op0 as u64) << KVM_REG_ARM64_SYSREG_OP0_SHIFT)
& KVM_REG_ARM64_SYSREG_OP0_MASK as u64)
| ((($op1 as u64) << KVM_REG_ARM64_SYSREG_OP1_SHIFT)
& KVM_REG_ARM64_SYSREG_OP1_MASK as u64)
| ((($crn as u64) << KVM_REG_ARM64_SYSREG_CRN_SHIFT)
& KVM_REG_ARM64_SYSREG_CRN_MASK as u64)
| ((($crm as u64) << KVM_REG_ARM64_SYSREG_CRM_SHIFT)
& KVM_REG_ARM64_SYSREG_CRM_MASK as u64)
| ((($op2 as u64) << KVM_REG_ARM64_SYSREG_OP2_SHIFT)
& KVM_REG_ARM64_SYSREG_OP2_MASK as u64);
};
}
// Constants imported from the Linux kernel:
// https://elixir.bootlin.com/linux/v4.20.17/source/arch/arm64/include/asm/sysreg.h#L135
arm64_sys_reg!(MPIDR_EL1, 3, 0, 0, 0, 5);
arm64_sys_reg!(MIDR_EL1, 3, 0, 0, 0, 0);
// ID registers that represent cpu capabilities.
// Needed for static cpu templates.
arm64_sys_reg!(ID_AA64PFR0_EL1, 3, 0, 0, 4, 0);
arm64_sys_reg!(ID_AA64ISAR0_EL1, 3, 0, 0, 6, 0);
arm64_sys_reg!(ID_AA64ISAR1_EL1, 3, 0, 0, 6, 1);
arm64_sys_reg!(ID_AA64MMFR2_EL1, 3, 0, 0, 7, 2);
// Counter-timer Virtual Timer CompareValue register.
// https://developer.arm.com/documentation/ddi0595/2021-12/AArch64-Registers/CNTV-CVAL-EL0--Counter-timer-Virtual-Timer-CompareValue-register
// https://elixir.bootlin.com/linux/v6.8/source/arch/arm64/include/asm/sysreg.h#L468
arm64_sys_reg!(SYS_CNTV_CVAL_EL0, 3, 3, 14, 3, 2);
// Counter-timer Physical Count Register
// https://developer.arm.com/documentation/ddi0601/2023-12/AArch64-Registers/CNTPCT-EL0--Counter-timer-Physical-Count-Register
// https://elixir.bootlin.com/linux/v6.8/source/arch/arm64/include/asm/sysreg.h#L459
arm64_sys_reg!(SYS_CNTPCT_EL0, 3, 3, 14, 0, 1);
// Physical Timer EL0 count Register
// The id of this register is same as SYS_CNTPCT_EL0, but KVM defines it
// separately, so we do as well.
// https://elixir.bootlin.com/linux/v6.12.6/source/arch/arm64/include/uapi/asm/kvm.h#L259
arm64_sys_reg!(KVM_REG_ARM_PTIMER_CNT, 3, 3, 14, 0, 1);
// Translation Table Base Register
// https://developer.arm.com/documentation/ddi0595/2021-03/AArch64-Registers/TTBR1-EL1--Translation-Table-Base-Register-1--EL1-
arm64_sys_reg!(TTBR1_EL1, 3, 0, 2, 0, 1);
// Translation Control Register
// https://developer.arm.com/documentation/ddi0601/2024-09/AArch64-Registers/TCR-EL1--Translation-Control-Register--EL1-
arm64_sys_reg!(TCR_EL1, 3, 0, 2, 0, 2);
// AArch64 Memory Model Feature Register
// https://developer.arm.com/documentation/100798/0400/register-descriptions/aarch64-system-registers/id-aa64mmfr0-el1--aarch64-memory-model-feature-register-0--el1
arm64_sys_reg!(ID_AA64MMFR0_EL1, 3, 0, 0, 7, 0);
/// Vector lengths pseudo-register
/// TODO: this can be removed after https://github.com/rust-vmm/kvm-bindings/pull/89
/// is merged and new version is used in Firecracker.
pub const KVM_REG_ARM64_SVE_VLS: u64 =
KVM_REG_ARM64 | KVM_REG_ARM64_SVE as u64 | KVM_REG_SIZE_U512 | 0xffff;
/// Program Counter
/// The offset value (0x100 = 32 * 8) is calcuated as follows:
/// - `kvm_regs` includes `regs` field of type `user_pt_regs` at the beginning (i.e., at offset 0).
/// - `pc` follows `regs[31]` and `sp` within `user_pt_regs` and they are 8 bytes each (i.e. the
/// offset is (31 + 1) * 8 = 256).
///
/// https://github.com/torvalds/linux/blob/master/Documentation/virt/kvm/api.rst#L2578
/// > 0x6030 0000 0010 0040 PC 64 regs.pc
pub const PC: u64 = {
let kreg_off = offset_of!(kvm_regs, regs);
let pc_off = offset_of!(user_pt_regs, pc);
arm64_core_reg_id!(KVM_REG_SIZE_U64, kreg_off + pc_off)
};
/// Different aarch64 registers sizes
#[derive(Debug)]
pub enum RegSize {
/// 8 bit register
U8,
/// 16 bit register
U16,
/// 32 bit register
U32,
/// 64 bit register
U64,
/// 128 bit register
U128,
/// 256 bit register
U256,
/// 512 bit register
U512,
/// 1024 bit register
U1024,
/// 2048 bit register
U2048,
}
impl RegSize {
/// Size of u8 register in bytes
pub const U8_SIZE: usize = 1;
/// Size of u16 register in bytes
pub const U16_SIZE: usize = 2;
/// Size of u32 register in bytes
pub const U32_SIZE: usize = 4;
/// Size of u64 register in bytes
pub const U64_SIZE: usize = 8;
/// Size of u128 register in bytes
pub const U128_SIZE: usize = 16;
/// Size of u256 register in bytes
pub const U256_SIZE: usize = 32;
/// Size of u512 register in bytes
pub const U512_SIZE: usize = 64;
/// Size of u1024 register in bytes
pub const U1024_SIZE: usize = 128;
/// Size of u2048 register in bytes
pub const U2048_SIZE: usize = 256;
}
impl From<usize> for RegSize {
fn from(value: usize) -> Self {
match value {
RegSize::U8_SIZE => RegSize::U8,
RegSize::U16_SIZE => RegSize::U16,
RegSize::U32_SIZE => RegSize::U32,
RegSize::U64_SIZE => RegSize::U64,
RegSize::U128_SIZE => RegSize::U128,
RegSize::U256_SIZE => RegSize::U256,
RegSize::U512_SIZE => RegSize::U512,
RegSize::U1024_SIZE => RegSize::U1024,
RegSize::U2048_SIZE => RegSize::U2048,
_ => unreachable!("Registers bigger then 2048 bits are not supported"),
}
}
}
impl From<RegSize> for usize {
fn from(value: RegSize) -> Self {
match value {
RegSize::U8 => RegSize::U8_SIZE,
RegSize::U16 => RegSize::U16_SIZE,
RegSize::U32 => RegSize::U32_SIZE,
RegSize::U64 => RegSize::U64_SIZE,
RegSize::U128 => RegSize::U128_SIZE,
RegSize::U256 => RegSize::U256_SIZE,
RegSize::U512 => RegSize::U512_SIZE,
RegSize::U1024 => RegSize::U1024_SIZE,
RegSize::U2048 => RegSize::U2048_SIZE,
}
}
}
/// Returns register size in bytes
pub fn reg_size(reg_id: u64) -> usize {
2_usize.pow(((reg_id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT) as u32)
}
/// Storage for aarch64 registers with different sizes.
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub struct Aarch64RegisterVec {
ids: Vec<u64>,
data: Vec<u8>,
}
impl Aarch64RegisterVec {
/// Returns the number of elements in the vector.
pub fn len(&self) -> usize {
self.ids.len()
}
/// Returns true if the vector contains no elements.
pub fn is_empty(&self) -> bool {
self.ids.is_empty()
}
/// Appends a register to the vector, copying register data.
pub fn push(&mut self, reg: Aarch64RegisterRef<'_>) {
self.ids.push(reg.id);
self.data.extend_from_slice(reg.data);
}
/// Returns an iterator over stored registers.
pub fn iter(&self) -> impl Iterator<Item = Aarch64RegisterRef<'_>> {
Aarch64RegisterVecIterator {
index: 0,
offset: 0,
ids: &self.ids,
data: &self.data,
}
}
/// Returns an iterator over stored registers that allows register modifications.
pub fn iter_mut(&mut self) -> impl Iterator<Item = Aarch64RegisterRefMut<'_>> {
Aarch64RegisterVecIteratorMut {
index: 0,
offset: 0,
ids: &self.ids,
data: &mut self.data,
}
}
/// Extract the Manufacturer ID from a VCPU state's registers.
/// The ID is found between bits 24-31 of MIDR_EL1 register.
pub fn manifacturer_id(&self) -> Option<u32> {
self.iter()
.find(|reg| reg.id == MIDR_EL1)
.map(|reg| ((reg.value::<u64, 8>() >> 24) & 0xFF) as u32)
}
}
impl Serialize for Aarch64RegisterVec {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(&(&self.ids, &self.data), serializer)
}
}
impl<'de> Deserialize<'de> for Aarch64RegisterVec {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let (ids, data): (Vec<u64>, Vec<u8>) = Deserialize::deserialize(deserializer)?;
let mut total_size: usize = 0;
for id in ids.iter() {
let reg_size = reg_size(*id);
if reg_size > RegSize::U2048_SIZE {
return Err(serde::de::Error::custom(
"Failed to deserialize aarch64 registers. Registers bigger than 2048 bits are \
not supported",
));
}
total_size += reg_size;
}
if total_size != data.len() {
return Err(serde::de::Error::custom(
"Failed to deserialize aarch64 registers. Sum of register sizes is not equal to \
registers data length",
));
}
Ok(Aarch64RegisterVec { ids, data })
}
}
/// Iterator over `Aarch64RegisterVec`.
#[derive(Debug)]
pub struct Aarch64RegisterVecIterator<'a> {
index: usize,
offset: usize,
ids: &'a [u64],
data: &'a [u8],
}
impl<'a> Iterator for Aarch64RegisterVecIterator<'a> {
type Item = Aarch64RegisterRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.ids.len() {
let id = self.ids[self.index];
let reg_size = reg_size(id);
let reg_ref = Aarch64RegisterRef {
id,
data: &self.data[self.offset..self.offset + reg_size],
};
self.index += 1;
self.offset += reg_size;
Some(reg_ref)
} else {
None
}
}
}
/// Iterator over `Aarch64RegisterVec` with mutable values.
#[derive(Debug)]
pub struct Aarch64RegisterVecIteratorMut<'a> {
index: usize,
offset: usize,
ids: &'a [u64],
data: &'a mut [u8],
}
impl<'a> Iterator for Aarch64RegisterVecIteratorMut<'a> {
type Item = Aarch64RegisterRefMut<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.ids.len() {
let id = self.ids[self.index];
let reg_size = reg_size(id);
let data = std::mem::take(&mut self.data);
let (head, tail) = data.split_at_mut(reg_size);
self.index += 1;
self.offset += reg_size;
self.data = tail;
Some(Aarch64RegisterRefMut { id, data: head })
} else {
None
}
}
}
/// Reference to the aarch64 register.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Aarch64RegisterRef<'a> {
/// ID of the register
pub id: u64,
data: &'a [u8],
}
impl<'a> Aarch64RegisterRef<'a> {
/// Creates new register reference with provided id and data.
/// Register size in `id` should be equal to the
/// length of the slice. Otherwise this method
/// will panic.
pub fn new(id: u64, data: &'a [u8]) -> Self {
assert_eq!(
reg_size(id),
data.len(),
"Attempt to create a register reference with incompatible id and data length"
);
Self { id, data }
}
/// Returns register size in bytes
pub fn size(&self) -> RegSize {
reg_size(self.id).into()
}
/// Returns a register value.
/// Type `T` must be of the same length as an
/// underlying data slice. Otherwise this method
/// will panic.
pub fn value<T: Aarch64RegisterData<N>, const N: usize>(&self) -> T {
T::from_slice(self.data)
}
/// Returns a string with hex formatted value of the register.
pub fn value_str(&self) -> String {
let hex = self.data.iter().rev().fold(String::new(), |mut acc, byte| {
write!(&mut acc, "{:02x}", byte).unwrap();
acc
});
format!("0x{hex}")
}
/// Returns register data as a byte slice
pub fn as_slice(&self) -> &[u8] {
self.data
}
}
/// Reference to the aarch64 register.
#[derive(Debug, PartialEq, Eq)]
pub struct Aarch64RegisterRefMut<'a> {
/// ID of the register
pub id: u64,
data: &'a mut [u8],
}
impl<'a> Aarch64RegisterRefMut<'a> {
/// Creates new register reference with provided id and data.
/// Register size in `id` should be equal to the
/// length of the slice. Otherwise this method
/// will panic.
pub fn new(id: u64, data: &'a mut [u8]) -> Self {
assert_eq!(
reg_size(id),
data.len(),
"Attempt to create a register reference with incompatible id and data length"
);
Self { id, data }
}
/// Returns register size in bytes
pub fn size(&self) -> RegSize {
reg_size(self.id).into()
}
/// Returns a register value.
/// Type `T` must be of the same length as an
/// underlying data slice. Otherwise this method
/// will panic.
pub fn value<T: Aarch64RegisterData<N>, const N: usize>(&self) -> T {
T::from_slice(self.data)
}
/// Sets the register value.
/// Type `T` must be of the same length as an
/// underlying data slice. Otherwise this method
/// will panic.
pub fn set_value<T: Aarch64RegisterData<N>, const N: usize>(&mut self, value: T) {
self.data.copy_from_slice(&value.to_bytes())
}
}
/// Trait for data types that can represent aarch64
/// register data.
pub trait Aarch64RegisterData<const N: usize> {
/// Create data type from slice
fn from_slice(slice: &[u8]) -> Self;
/// Convert data type to array of bytes
fn to_bytes(&self) -> [u8; N];
}
macro_rules! reg_data {
($t:ty, $bytes: expr) => {
impl Aarch64RegisterData<$bytes> for $t {
fn from_slice(slice: &[u8]) -> Self {
let mut bytes = [0_u8; $bytes];
bytes.copy_from_slice(slice);
<$t>::from_le_bytes(bytes)
}
fn to_bytes(&self) -> [u8; $bytes] {
self.to_le_bytes()
}
}
};
}
macro_rules! reg_data_array {
($t:ty, $bytes: expr) => {
impl Aarch64RegisterData<$bytes> for $t {
fn from_slice(slice: &[u8]) -> Self {
let mut bytes = [0_u8; $bytes];
bytes.copy_from_slice(slice);
bytes
}
fn to_bytes(&self) -> [u8; $bytes] {
*self
}
}
};
}
reg_data!(u8, 1);
reg_data!(u16, 2);
reg_data!(u32, 4);
reg_data!(u64, 8);
reg_data!(u128, 16);
// 256
reg_data_array!([u8; 32], 32);
// 512
reg_data_array!([u8; 64], 64);
// 1024
reg_data_array!([u8; 128], 128);
// 2048
reg_data_array!([u8; 256], 256);
#[cfg(test)]
mod tests {
use super::*;
use crate::snapshot::Snapshot;
#[test]
fn test_reg_size() {
assert_eq!(reg_size(KVM_REG_SIZE_U32), 4);
// ID_AA64PFR0_EL1 is 64 bit register
assert_eq!(reg_size(ID_AA64PFR0_EL1), 8);
}
#[test]
fn test_aarch64_register_vec_serde() {
let mut v = Aarch64RegisterVec::default();
let reg1_bytes = 1_u8.to_le_bytes();
let reg1 = Aarch64RegisterRef::new(u64::from(KVM_REG_SIZE_U8), ®1_bytes);
let reg2_bytes = 2_u16.to_le_bytes();
let reg2 = Aarch64RegisterRef::new(KVM_REG_SIZE_U16, ®2_bytes);
v.push(reg1);
v.push(reg2);
let mut buf = vec![0; 10000];
Snapshot::new(&v).save(&mut buf.as_mut_slice()).unwrap();
let restored: Aarch64RegisterVec = Snapshot::load_without_crc_check(buf.as_slice())
.unwrap()
.data;
for (old, new) in v.iter().zip(restored.iter()) {
assert_eq!(old, new);
}
}
#[test]
fn test_aarch64_register_vec_serde_invalid_regs_size_sum() {
let mut v = Aarch64RegisterVec::default();
let reg1_bytes = 1_u8.to_le_bytes();
// Creating invalid register with incompatible ID and reg size.
let reg1 = Aarch64RegisterRef {
id: KVM_REG_SIZE_U16,
data: ®1_bytes,
};
let reg2_bytes = 2_u16.to_le_bytes();
let reg2 = Aarch64RegisterRef::new(KVM_REG_SIZE_U16, ®2_bytes);
v.push(reg1);
v.push(reg2);
let mut buf = vec![0; 10000];
Snapshot::new(&v).save(&mut buf.as_mut_slice()).unwrap();
// Total size of registers according IDs are 16 + 16 = 32,
// but actual data size is 8 + 16 = 24.
Snapshot::<Aarch64RegisterVec>::load_without_crc_check(buf.as_slice()).unwrap_err();
}
#[test]
fn test_aarch64_register_vec_serde_invalid_reg_size() {
let mut v = Aarch64RegisterVec::default();
let reg_bytes = [0_u8; 512];
// Creating invalid register with incompatible size.
// 512 bytes for 4096 bit wide register.
let reg = Aarch64RegisterRef {
id: 0x0090000000000000,
data: ®_bytes,
};
v.push(reg);
let mut buf = vec![0; 10000];
Snapshot::new(v).save(&mut buf.as_mut_slice()).unwrap();
// 4096 bit wide registers are not supported.
Snapshot::<Aarch64RegisterVec>::load_without_crc_check(buf.as_slice()).unwrap_err();
}
#[test]
fn test_aarch64_register_vec() {
let mut v = Aarch64RegisterVec::default();
let reg1_bytes = 1_u8.to_le_bytes();
let reg1 = Aarch64RegisterRef::new(u64::from(KVM_REG_SIZE_U8), ®1_bytes);
let reg2_bytes = 2_u16.to_le_bytes();
let reg2 = Aarch64RegisterRef::new(KVM_REG_SIZE_U16, ®2_bytes);
let reg3_bytes = 3_u32.to_le_bytes();
let reg3 = Aarch64RegisterRef::new(KVM_REG_SIZE_U32, ®3_bytes);
let reg4_bytes = 4_u64.to_le_bytes();
let reg4 = Aarch64RegisterRef::new(KVM_REG_SIZE_U64, ®4_bytes);
let reg5_bytes = 5_u128.to_le_bytes();
let reg5 = Aarch64RegisterRef::new(KVM_REG_SIZE_U128, ®5_bytes);
let reg6 = Aarch64RegisterRef::new(KVM_REG_SIZE_U256, &[6; 32]);
let reg7 = Aarch64RegisterRef::new(KVM_REG_SIZE_U512, &[7; 64]);
let reg8 = Aarch64RegisterRef::new(KVM_REG_SIZE_U1024, &[8; 128]);
let reg9 = Aarch64RegisterRef::new(KVM_REG_SIZE_U2048, &[9; 256]);
v.push(reg1);
v.push(reg2);
v.push(reg3);
v.push(reg4);
v.push(reg5);
v.push(reg6);
v.push(reg7);
v.push(reg8);
v.push(reg9);
assert!(!v.is_empty());
assert_eq!(v.len(), 9);
// Test iter
{
macro_rules! test_iter {
($iter:expr, $size: expr, $t:ty, $bytes:expr, $value:expr) => {
let reg_ref = $iter.next().unwrap();
assert_eq!(reg_ref.id, u64::from($size));
assert_eq!(reg_ref.value::<$t, $bytes>(), $value);
};
}
let mut regs_iter = v.iter();
test_iter!(regs_iter, KVM_REG_SIZE_U8, u8, 1, 1);
test_iter!(regs_iter, KVM_REG_SIZE_U16, u16, 2, 2);
test_iter!(regs_iter, KVM_REG_SIZE_U32, u32, 4, 3);
test_iter!(regs_iter, KVM_REG_SIZE_U64, u64, 8, 4);
test_iter!(regs_iter, KVM_REG_SIZE_U128, u128, 16, 5);
test_iter!(regs_iter, KVM_REG_SIZE_U256, [u8; 32], 32, [6; 32]);
test_iter!(regs_iter, KVM_REG_SIZE_U512, [u8; 64], 64, [7; 64]);
test_iter!(regs_iter, KVM_REG_SIZE_U1024, [u8; 128], 128, [8; 128]);
test_iter!(regs_iter, KVM_REG_SIZE_U2048, [u8; 256], 256, [9; 256]);
assert!(regs_iter.next().is_none());
}
// Test iter mut
{
{
macro_rules! update_value {
($iter:expr, $t:ty, $bytes:expr) => {
let mut reg_ref = $iter.next().unwrap();
reg_ref.set_value(reg_ref.value::<$t, $bytes>() - 1);
};
}
let mut regs_iter_mut = v.iter_mut();
update_value!(regs_iter_mut, u8, 1);
update_value!(regs_iter_mut, u16, 2);
update_value!(regs_iter_mut, u32, 4);
update_value!(regs_iter_mut, u64, 8);
update_value!(regs_iter_mut, u128, 16);
}
{
macro_rules! test_iter {
($iter:expr, $t:ty, $bytes:expr, $value:expr) => {
let reg_ref = $iter.next().unwrap();
assert_eq!(reg_ref.value::<$t, $bytes>(), $value);
};
}
let mut regs_iter = v.iter();
test_iter!(regs_iter, u8, 1, 0);
test_iter!(regs_iter, u16, 2, 1);
test_iter!(regs_iter, u32, 4, 2);
test_iter!(regs_iter, u64, 8, 3);
test_iter!(regs_iter, u128, 16, 4);
}
}
}
#[test]
fn test_reg_ref() {
let bytes = 69_u64.to_le_bytes();
let reg_ref = Aarch64RegisterRef::new(KVM_REG_SIZE_U64, &bytes);
assert_eq!(usize::from(reg_ref.size()), 8);
assert_eq!(reg_ref.value::<u64, 8>(), 69);
}
#[test]
fn test_reg_ref_value_str() {
let bytes = 0x10_u8.to_le_bytes();
let reg_ref = Aarch64RegisterRef::new(KVM_REG_SIZE_U8 as u64, &bytes);
assert_eq!(reg_ref.value_str(), "0x10");
let bytes = 0x1020_u16.to_le_bytes();
let reg_ref = Aarch64RegisterRef::new(KVM_REG_SIZE_U16, &bytes);
assert_eq!(reg_ref.value_str(), "0x1020");
let bytes = 0x10203040_u32.to_le_bytes();
let reg_ref = Aarch64RegisterRef::new(KVM_REG_SIZE_U32, &bytes);
assert_eq!(reg_ref.value_str(), "0x10203040");
let bytes = 0x1020304050607080_u64.to_le_bytes();
let reg_ref = Aarch64RegisterRef::new(KVM_REG_SIZE_U64, &bytes);
assert_eq!(reg_ref.value_str(), "0x1020304050607080");
let bytes = [
0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x90, 0x80, 0x70, 0x60, 0x50, 0x40, 0x30,
0x20, 0x10,
];
let reg_ref = Aarch64RegisterRef::new(KVM_REG_SIZE_U128, &bytes);
assert_eq!(reg_ref.value_str(), "0x10203040506070809011213141516171");
}
/// Should panic because ID has different size from a slice length.
/// - Size in ID: 128
/// - Length of slice: 1
#[test]
#[should_panic]
fn test_reg_ref_new_must_panic() {
let _ = Aarch64RegisterRef::new(KVM_REG_SIZE_U128, &[0; 1]);
}
/// Should panic because of incorrect cast to value.
/// - Reference contains 64 bit register
/// - Casting to 128 bits.
#[test]
#[should_panic]
fn test_reg_ref_value_must_panic() {
let bytes = 69_u64.to_le_bytes();
let reg_ref = Aarch64RegisterRef::new(KVM_REG_SIZE_U64, &bytes);
assert_eq!(reg_ref.value::<u128, 16>(), 69);
}
#[test]
fn test_reg_ref_mut() {
let mut bytes = 69_u64.to_le_bytes();
let mut reg_ref = Aarch64RegisterRefMut::new(KVM_REG_SIZE_U64, &mut bytes);
assert_eq!(usize::from(reg_ref.size()), 8);
assert_eq!(reg_ref.value::<u64, 8>(), 69);
reg_ref.set_value(reg_ref.value::<u64, 8>() + 1);
assert_eq!(reg_ref.value::<u64, 8>(), 70);
}
/// Should panic because ID has different size from a slice length.
/// - Size in ID: 128
/// - Length of slice: 1
#[test]
#[should_panic]
fn test_reg_ref_mut_new_must_panic() {
let _ = Aarch64RegisterRefMut::new(KVM_REG_SIZE_U128, &mut [0; 1]);
}
/// Should panic because of incorrect cast to value.
/// - Reference contains 64 bit register
/// - Casting to 128 bits.
#[test]
#[should_panic]
fn test_reg_ref_mut_must_panic() {
let mut bytes = 69_u64.to_le_bytes();
let reg_ref = Aarch64RegisterRefMut::new(KVM_REG_SIZE_U64, &mut bytes);
assert_eq!(reg_ref.value::<u128, 16>(), 69);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/mod.rs | src/vmm/src/arch/aarch64/gic/mod.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
mod gicv2;
mod gicv3;
mod regs;
use gicv2::GICv2;
use gicv3::GICv3;
use kvm_ioctls::{DeviceFd, VmFd};
pub use regs::GicState;
use super::layout;
/// Represent a V2 or V3 GIC device
#[derive(Debug)]
pub struct GIC {
/// The file descriptor for the KVM device
fd: DeviceFd,
/// GIC device properties, to be used for setting up the fdt entry
properties: [u64; 4],
/// MSI properties of the GIC device
msi_properties: Option<[u64; 2]>,
/// Number of CPUs handled by the device
vcpu_count: u64,
/// ITS device
its_device: Option<DeviceFd>,
}
impl GIC {
/// Returns the file descriptor of the GIC device
pub fn device_fd(&self) -> &DeviceFd {
&self.fd
}
/// Returns an array with GIC device properties
pub fn device_properties(&self) -> &[u64] {
&self.properties
}
/// Returns the number of vCPUs this GIC handles
pub fn vcpu_count(&self) -> u64 {
self.vcpu_count
}
}
/// Errors thrown while setting up the GIC.
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum GicError {
/// Error while calling KVM ioctl for setting up the global interrupt controller: {0}
CreateGIC(kvm_ioctls::Error),
/// Error while setting or getting device attributes for the GIC: {0}, {1}, {2}
DeviceAttribute(kvm_ioctls::Error, bool, u32),
/// The number of vCPUs in the GicState doesn't match the number of vCPUs on the system.
InconsistentVcpuCount,
/// The VgicSysRegsState is invalid.
InvalidVgicSysRegState,
}
/// List of implemented GICs.
#[derive(Debug)]
pub enum GICVersion {
/// Legacy version.
GICV2,
/// GICV3 without ITS.
GICV3,
}
/// Trait for GIC devices.
#[derive(Debug)]
pub enum GICDevice {
/// Legacy version.
V2(GICv2),
/// GICV3 without ITS.
V3(GICv3),
}
impl GICDevice {
/// Returns the file descriptor of the GIC device
pub fn device_fd(&self) -> &DeviceFd {
match self {
Self::V2(x) => x.device_fd(),
Self::V3(x) => x.device_fd(),
}
}
/// Returns the file descriptor of the ITS device, if any
pub fn its_fd(&self) -> Option<&DeviceFd> {
match self {
Self::V2(_) => None,
Self::V3(x) => x.its_device.as_ref(),
}
}
/// Returns an array with GIC device properties
pub fn device_properties(&self) -> &[u64] {
match self {
Self::V2(x) => x.device_properties(),
Self::V3(x) => x.device_properties(),
}
}
/// Returns an array with MSI properties if GIC supports it
pub fn msi_properties(&self) -> Option<&[u64; 2]> {
match self {
Self::V2(x) => x.msi_properties.as_ref(),
Self::V3(x) => x.msi_properties.as_ref(),
}
}
/// Returns the number of vCPUs this GIC handles
pub fn vcpu_count(&self) -> u64 {
match self {
Self::V2(x) => x.vcpu_count(),
Self::V3(x) => x.vcpu_count(),
}
}
/// Returns the fdt compatibility property of the device
pub fn fdt_compatibility(&self) -> &str {
match self {
Self::V2(x) => x.fdt_compatibility(),
Self::V3(x) => x.fdt_compatibility(),
}
}
/// Returns the maint_irq fdt property of the device
pub fn fdt_maint_irq(&self) -> u32 {
match self {
Self::V2(x) => x.fdt_maint_irq(),
Self::V3(x) => x.fdt_maint_irq(),
}
}
/// Returns the GIC version of the device
pub fn version(&self) -> u32 {
match self {
Self::V2(_) => GICv2::VERSION,
Self::V3(_) => GICv3::VERSION,
}
}
/// Setup the device-specific attributes
pub fn init_device_attributes(gic_device: &Self) -> Result<(), GicError> {
match gic_device {
Self::V2(x) => GICv2::init_device_attributes(x),
Self::V3(x) => GICv3::init_device_attributes(x),
}
}
/// Method to save the state of the GIC device.
pub fn save_device(&self, mpidrs: &[u64]) -> Result<GicState, GicError> {
match self {
Self::V2(x) => x.save_device(mpidrs),
Self::V3(x) => x.save_device(mpidrs),
}
}
/// Method to restore the state of the GIC device.
pub fn restore_device(&self, mpidrs: &[u64], state: &GicState) -> Result<(), GicError> {
match self {
Self::V2(x) => x.restore_device(mpidrs, state),
Self::V3(x) => x.restore_device(mpidrs, state),
}
}
}
/// Create a GIC device.
///
/// If "version" parameter is "None" the function will try to create by default a GICv3 device.
/// If that fails it will try to fall-back to a GICv2 device.
/// If version is Some the function will try to create a device of exactly the specified version.
pub fn create_gic(
vm: &VmFd,
vcpu_count: u64,
version: Option<GICVersion>,
) -> Result<GICDevice, GicError> {
match version {
Some(GICVersion::GICV2) => GICv2::create(vm, vcpu_count).map(GICDevice::V2),
Some(GICVersion::GICV3) => GICv3::create(vm, vcpu_count).map(GICDevice::V3),
None => GICv3::create(vm, vcpu_count)
.map(GICDevice::V3)
.or_else(|_| GICv2::create(vm, vcpu_count).map(GICDevice::V2)),
}
}
#[cfg(test)]
mod tests {
use kvm_ioctls::Kvm;
use super::*;
#[test]
fn test_create_gic() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
create_gic(&vm, 1, None).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/regs.rs | src/vmm/src/arch/aarch64/gic/regs.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use std::iter::StepBy;
use std::ops::Range;
use kvm_bindings::kvm_device_attr;
use kvm_ioctls::DeviceFd;
use serde::{Deserialize, Serialize};
use crate::arch::aarch64::gic::GicError;
use crate::arch::aarch64::gic::gicv3::regs::its_regs::ItsRegisterState;
#[derive(Debug, Serialize, Deserialize)]
pub struct GicRegState<T> {
pub(crate) chunks: Vec<T>,
}
/// Structure for serializing the state of the Vgic ICC regs
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct VgicSysRegsState {
pub main_icc_regs: Vec<GicRegState<u64>>,
pub ap_icc_regs: Vec<Option<GicRegState<u64>>>,
}
/// Structure used for serializing the state of the GIC registers.
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct GicState {
/// The state of the distributor registers.
pub dist: Vec<GicRegState<u32>>,
/// The state of the vcpu interfaces.
pub gic_vcpu_states: Vec<GicVcpuState>,
/// The state of the ITS device. Only present with GICv3.
pub its_state: Option<ItsRegisterState>,
}
/// Structure used for serializing the state of the GIC registers for a specific vCPU.
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct GicVcpuState {
pub rdist: Vec<GicRegState<u32>>,
pub icc: VgicSysRegsState,
}
pub(crate) trait MmioReg {
fn range(&self) -> Range<u64>;
fn iter<T>(&self) -> StepBy<Range<u64>>
where
Self: Sized,
{
self.range().step_by(std::mem::size_of::<T>())
}
}
pub(crate) trait VgicRegEngine {
type Reg: MmioReg;
type RegChunk: Clone + Default;
fn group() -> u32;
fn mpidr_mask() -> u64 {
0
}
fn kvm_device_attr(offset: u64, val: &mut Self::RegChunk, mpidr: u64) -> kvm_device_attr {
kvm_device_attr {
group: Self::group(),
attr: (mpidr & Self::mpidr_mask()) | offset,
addr: val as *mut Self::RegChunk as u64,
flags: 0,
}
}
#[inline]
fn get_reg_data(
fd: &DeviceFd,
reg: &Self::Reg,
mpidr: u64,
) -> Result<GicRegState<Self::RegChunk>, GicError>
where
Self: Sized,
{
let mut data = Vec::with_capacity(reg.iter::<Self::RegChunk>().count());
for offset in reg.iter::<Self::RegChunk>() {
let mut val = Self::RegChunk::default();
// SAFETY: `val` is a mutable memory location sized correctly for the attribute we're
// requesting
unsafe {
fd.get_device_attr(&mut Self::kvm_device_attr(offset, &mut val, mpidr))
.map_err(|err| GicError::DeviceAttribute(err, false, Self::group()))?;
}
data.push(val);
}
Ok(GicRegState { chunks: data })
}
fn get_regs_data(
fd: &DeviceFd,
regs: Box<dyn Iterator<Item = &Self::Reg>>,
mpidr: u64,
) -> Result<Vec<GicRegState<Self::RegChunk>>, GicError>
where
Self: Sized,
{
let mut data = Vec::new();
for reg in regs {
data.push(Self::get_reg_data(fd, reg, mpidr)?);
}
Ok(data)
}
#[inline]
fn set_reg_data(
fd: &DeviceFd,
reg: &Self::Reg,
data: &GicRegState<Self::RegChunk>,
mpidr: u64,
) -> Result<(), GicError>
where
Self: Sized,
{
for (offset, val) in reg.iter::<Self::RegChunk>().zip(&data.chunks) {
fd.set_device_attr(&Self::kvm_device_attr(offset, &mut val.clone(), mpidr))
.map_err(|err| GicError::DeviceAttribute(err, true, Self::group()))?;
}
Ok(())
}
fn set_regs_data(
fd: &DeviceFd,
regs: Box<dyn Iterator<Item = &Self::Reg>>,
data: &[GicRegState<Self::RegChunk>],
mpidr: u64,
) -> Result<(), GicError>
where
Self: Sized,
{
for (reg, reg_data) in regs.zip(data) {
Self::set_reg_data(fd, reg, reg_data, mpidr)?;
}
Ok(())
}
}
/// Structure representing a simple register.
#[derive(PartialEq)]
pub(crate) struct SimpleReg {
/// The offset from the component address. The register is memory mapped here.
offset: u64,
/// Size in bytes.
size: u16,
}
impl SimpleReg {
pub const fn new(offset: u64, size: u16) -> SimpleReg {
SimpleReg { offset, size }
}
}
impl MmioReg for SimpleReg {
fn range(&self) -> Range<u64> {
self.offset..self.offset + u64::from(self.size)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv3/mod.rs | src/vmm/src/arch/aarch64/gic/gicv3/mod.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod regs;
use kvm_ioctls::{DeviceFd, VmFd};
use crate::arch::aarch64::gic::{GicError, GicState};
#[derive(Debug)]
pub struct GICv3(super::GIC);
impl std::ops::Deref for GICv3 {
type Target = super::GIC;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for GICv3 {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl GICv3 {
// Unfortunately bindgen omits defines that are based on other defines.
// See arch/arm64/include/uapi/asm/kvm.h file from the linux kernel.
const SZ_64K: u64 = 0x0001_0000;
const KVM_VGIC_V3_DIST_SIZE: u64 = GICv3::SZ_64K;
const KVM_VGIC_V3_REDIST_SIZE: u64 = (2 * GICv3::SZ_64K);
const GIC_V3_ITS_SIZE: u64 = 0x2_0000;
// Device trees specific constants
const ARCH_GIC_V3_MAINT_IRQ: u32 = 9;
/// Get the address of the GIC distributor.
fn get_dist_addr() -> u64 {
super::layout::MMIO32_MEM_START - GICv3::KVM_VGIC_V3_DIST_SIZE
}
/// Get the size of the GIC distributor.
fn get_dist_size() -> u64 {
GICv3::KVM_VGIC_V3_DIST_SIZE
}
/// Get the address of the GIC redistributors.
fn get_redists_addr(vcpu_count: u64) -> u64 {
GICv3::get_dist_addr() - GICv3::get_redists_size(vcpu_count)
}
/// Get the size of the GIC redistributors.
fn get_redists_size(vcpu_count: u64) -> u64 {
vcpu_count * GICv3::KVM_VGIC_V3_REDIST_SIZE
}
/// Get the MSI address
fn get_msi_address(vcpu_count: u64) -> u64 {
Self::get_redists_addr(vcpu_count) - GICv3::GIC_V3_ITS_SIZE
}
/// Get the MSI size
const fn get_msi_size() -> u64 {
GICv3::GIC_V3_ITS_SIZE
}
pub const VERSION: u32 = kvm_bindings::kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3;
pub fn fdt_compatibility(&self) -> &str {
"arm,gic-v3"
}
pub fn fdt_maint_irq(&self) -> u32 {
GICv3::ARCH_GIC_V3_MAINT_IRQ
}
/// Create the GIC device object
pub fn create_device(vm: &VmFd, vcpu_count: u64) -> Result<Self, GicError> {
// Create the GIC device
let mut gic_device = kvm_bindings::kvm_create_device {
type_: Self::VERSION,
fd: 0,
flags: 0,
};
let gic_fd = vm
.create_device(&mut gic_device)
.map_err(GicError::CreateGIC)?;
Ok(GICv3(super::GIC {
fd: gic_fd,
properties: [
GICv3::get_dist_addr(),
GICv3::get_dist_size(),
GICv3::get_redists_addr(vcpu_count),
GICv3::get_redists_size(vcpu_count),
],
msi_properties: Some([GICv3::get_msi_address(vcpu_count), GICv3::get_msi_size()]),
vcpu_count,
its_device: None,
}))
}
pub fn save_device(&self, mpidrs: &[u64]) -> Result<GicState, GicError> {
regs::save_state(&self.fd, self.its_device.as_ref().unwrap(), mpidrs)
}
pub fn restore_device(&self, mpidrs: &[u64], state: &GicState) -> Result<(), GicError> {
regs::restore_state(&self.fd, self.its_device.as_ref().unwrap(), mpidrs, state)
}
pub fn init_device_attributes(gic_device: &Self) -> Result<(), GicError> {
// Setting up the distributor attribute.
// We are placing the GIC below 1GB so we need to subtract the size of the distributor.
Self::set_device_attribute(
gic_device.device_fd(),
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_ADDR,
u64::from(kvm_bindings::KVM_VGIC_V3_ADDR_TYPE_DIST),
&GICv3::get_dist_addr() as *const u64 as u64,
0,
)?;
// Setting up the redistributors' attribute.
// We are calculating here the start of the redistributors address. We have one per CPU.
Self::set_device_attribute(
gic_device.device_fd(),
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_ADDR,
u64::from(kvm_bindings::KVM_VGIC_V3_ADDR_TYPE_REDIST),
&GICv3::get_redists_addr(gic_device.vcpu_count()) as *const u64 as u64,
0,
)?;
Ok(())
}
fn init_its(vm: &VmFd, gic_device: &mut Self) -> Result<(), GicError> {
// ITS part attributes
let mut its_device = kvm_bindings::kvm_create_device {
type_: kvm_bindings::kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_ITS,
fd: 0,
flags: 0,
};
let its_fd = vm
.create_device(&mut its_device)
.map_err(GicError::CreateGIC)?;
// Setting up the ITS attributes
Self::set_device_attribute(
&its_fd,
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_ADDR,
u64::from(kvm_bindings::KVM_VGIC_ITS_ADDR_TYPE),
&Self::get_msi_address(gic_device.vcpu_count()) as *const u64 as u64,
0,
)?;
Self::set_device_attribute(
&its_fd,
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_CTRL,
u64::from(kvm_bindings::KVM_DEV_ARM_VGIC_CTRL_INIT),
0,
0,
)?;
gic_device.its_device = Some(its_fd);
Ok(())
}
/// Method to initialize the GIC device
pub fn create(vm: &VmFd, vcpu_count: u64) -> Result<Self, GicError> {
let mut device = Self::create_device(vm, vcpu_count)?;
Self::init_device_attributes(&device)?;
Self::init_its(vm, &mut device)?;
Self::finalize_device(&device)?;
Ok(device)
}
/// Finalize the setup of a GIC device
pub fn finalize_device(gic_device: &Self) -> Result<(), GicError> {
// On arm there are 3 types of interrupts: SGI (0-15), PPI (16-31), SPI (32-1020).
// SPIs are used to signal interrupts from various peripherals accessible across
// the whole system so these are the ones that we increment when adding a new virtio device.
// KVM_DEV_ARM_VGIC_GRP_NR_IRQS sets the number of interrupts (SGI, PPI, and SPI).
// Consequently, we need to add 32 to the number of SPIs ("legacy GSI").
let nr_irqs: u32 = crate::arch::GSI_LEGACY_NUM + super::layout::SPI_START;
let nr_irqs_ptr = &nr_irqs as *const u32;
Self::set_device_attribute(
gic_device.device_fd(),
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0,
nr_irqs_ptr as u64,
0,
)?;
// Finalize the GIC.
// See https://code.woboq.org/linux/linux/virt/kvm/arm/vgic/vgic-kvm-device.c.html#211.
Self::set_device_attribute(
gic_device.device_fd(),
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_CTRL,
u64::from(kvm_bindings::KVM_DEV_ARM_VGIC_CTRL_INIT),
0,
0,
)?;
Ok(())
}
/// Set a GIC device attribute
pub fn set_device_attribute(
fd: &DeviceFd,
group: u32,
attr: u64,
addr: u64,
flags: u32,
) -> Result<(), GicError> {
let attr = kvm_bindings::kvm_device_attr {
flags,
group,
attr,
addr,
};
fd.set_device_attr(&attr)
.map_err(|err| GicError::DeviceAttribute(err, true, group))?;
Ok(())
}
}
/// Function that flushes
/// RDIST pending tables into guest RAM.
///
/// The tables get flushed to guest RAM whenever the VM gets stopped.
fn save_pending_tables(gic_device: &DeviceFd) -> Result<(), GicError> {
let init_gic_attr = kvm_bindings::kvm_device_attr {
group: kvm_bindings::KVM_DEV_ARM_VGIC_GRP_CTRL,
attr: u64::from(kvm_bindings::KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES),
addr: 0,
flags: 0,
};
gic_device.set_device_attr(&init_gic_attr).map_err(|err| {
GicError::DeviceAttribute(err, true, kvm_bindings::KVM_DEV_ARM_VGIC_GRP_CTRL)
})
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use kvm_ioctls::Kvm;
use super::*;
use crate::arch::aarch64::gic::{GICVersion, create_gic};
#[test]
fn test_save_pending_tables() {
use std::os::unix::io::AsRawFd;
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let gic = create_gic(&vm, 1, Some(GICVersion::GICV3)).expect("Cannot create gic");
save_pending_tables(gic.device_fd()).unwrap();
unsafe { libc::close(gic.device_fd().as_raw_fd()) };
let res = save_pending_tables(gic.device_fd());
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), true, 4)"
);
// dropping gic_fd would double close the gic fd, so leak it
std::mem::forget(gic);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv3/regs/its_regs.rs | src/vmm/src/arch/aarch64/gic/gicv3/regs/its_regs.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use kvm_bindings::{
KVM_DEV_ARM_ITS_RESTORE_TABLES, KVM_DEV_ARM_ITS_SAVE_TABLES, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
};
use kvm_ioctls::DeviceFd;
use serde::{Deserialize, Serialize};
use crate::arch::aarch64::gic::GicError;
// ITS registers that we want to preserve across snapshots
const GITS_CTLR: u32 = 0x0000;
const GITS_IIDR: u32 = 0x0004;
const GITS_CBASER: u32 = 0x0080;
const GITS_CWRITER: u32 = 0x0088;
const GITS_CREADR: u32 = 0x0090;
const GITS_BASER: u32 = 0x0100;
fn set_device_attribute(
its_device: &DeviceFd,
group: u32,
attr: u32,
val: u64,
) -> Result<(), GicError> {
let gicv3_its_attr = kvm_bindings::kvm_device_attr {
group,
attr: attr as u64,
addr: &val as *const u64 as u64,
flags: 0,
};
its_device
.set_device_attr(&gicv3_its_attr)
.map_err(|err| GicError::DeviceAttribute(err, true, group))
}
fn get_device_attribute(its_device: &DeviceFd, group: u32, attr: u32) -> Result<u64, GicError> {
let mut val = 0;
let mut gicv3_its_attr = kvm_bindings::kvm_device_attr {
group,
attr: attr as u64,
addr: &mut val as *mut u64 as u64,
flags: 0,
};
// SAFETY: gicv3_its_attr.addr is safe to write to.
unsafe { its_device.get_device_attr(&mut gicv3_its_attr) }
.map_err(|err| GicError::DeviceAttribute(err, false, group))?;
Ok(val)
}
fn its_read_register(its_fd: &DeviceFd, attr: u32) -> Result<u64, GicError> {
get_device_attribute(its_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, attr)
}
fn its_set_register(its_fd: &DeviceFd, attr: u32, val: u64) -> Result<(), GicError> {
set_device_attribute(its_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, attr, val)
}
pub fn its_save_tables(its_fd: &DeviceFd) -> Result<(), GicError> {
set_device_attribute(
its_fd,
KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_ITS_SAVE_TABLES,
0,
)
}
pub fn its_restore_tables(its_fd: &DeviceFd) -> Result<(), GicError> {
set_device_attribute(
its_fd,
KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_ITS_RESTORE_TABLES,
0,
)
}
/// ITS registers that we save/restore during snapshot
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct ItsRegisterState {
iidr: u64,
cbaser: u64,
creadr: u64,
cwriter: u64,
baser: [u64; 8],
ctlr: u64,
}
impl ItsRegisterState {
/// Save ITS state
pub fn save(its_fd: &DeviceFd) -> Result<Self, GicError> {
let mut state = ItsRegisterState::default();
for i in 0..8 {
state.baser[i as usize] = its_read_register(its_fd, GITS_BASER + i * 8)?;
}
state.ctlr = its_read_register(its_fd, GITS_CTLR)?;
state.cbaser = its_read_register(its_fd, GITS_CBASER)?;
state.creadr = its_read_register(its_fd, GITS_CREADR)?;
state.cwriter = its_read_register(its_fd, GITS_CWRITER)?;
state.iidr = its_read_register(its_fd, GITS_IIDR)?;
Ok(state)
}
/// Restore ITS state
///
/// We need to restore ITS registers in a very specific order for things to work. Take a look
/// at:
/// https://elixir.bootlin.com/linux/v6.1.141/source/Documentation/virt/kvm/devices/arm-vgic-its.rst#L60
/// and
/// https://elixir.bootlin.com/linux/v6.1.141/source/Documentation/virt/kvm/devices/arm-vgic-its.rst#L123
///
/// for more details, but TL;DR is:
///
/// We need to restore GITS_CBASER, GITS_CREADER, GITS_CWRITER, GITS_BASER and GITS_IIDR
/// registers before restoring ITS tables from guest memory. We also need to set GITS_CTLR
/// last.
pub fn restore(&self, its_fd: &DeviceFd) -> Result<(), GicError> {
its_set_register(its_fd, GITS_IIDR, self.iidr)?;
its_set_register(its_fd, GITS_CBASER, self.cbaser)?;
its_set_register(its_fd, GITS_CREADR, self.creadr)?;
its_set_register(its_fd, GITS_CWRITER, self.cwriter)?;
for i in 0..8 {
its_set_register(its_fd, GITS_BASER + i * 8, self.baser[i as usize])?;
}
// We need to restore saved ITS tables before restoring GITS_CTLR
its_restore_tables(its_fd)?;
its_set_register(its_fd, GITS_CTLR, self.ctlr)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv3/regs/dist_regs.rs | src/vmm/src/arch/aarch64/gic/gicv3/regs/dist_regs.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::ops::Range;
use kvm_bindings::KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
use kvm_ioctls::DeviceFd;
use crate::arch::aarch64::gic::GicError;
use crate::arch::aarch64::gic::regs::{GicRegState, MmioReg, SimpleReg, VgicRegEngine};
use crate::arch::{GSI_LEGACY_NUM, SPI_START};
// Distributor registers as detailed at page 456 from
// https://static.docs.arm.com/ihi0069/c/IHI0069C_gic_architecture_specification.pdf.
// Address offsets are relative to the Distributor base address defined
// by the system memory map.
const GICD_CTLR: DistReg = DistReg::simple(0x0, 4);
const GICD_STATUSR: DistReg = DistReg::simple(0x0010, 4);
const GICD_IGROUPR: DistReg = DistReg::shared_irq(0x0080, 1);
const GICD_ISENABLER: DistReg = DistReg::shared_irq(0x0100, 1);
const GICD_ICENABLER: DistReg = DistReg::shared_irq(0x0180, 1);
const GICD_ISPENDR: DistReg = DistReg::shared_irq(0x0200, 1);
const GICD_ICPENDR: DistReg = DistReg::shared_irq(0x0280, 1);
const GICD_ISACTIVER: DistReg = DistReg::shared_irq(0x0300, 1);
const GICD_ICACTIVER: DistReg = DistReg::shared_irq(0x0380, 1);
const GICD_IPRIORITYR: DistReg = DistReg::shared_irq(0x0400, 8);
const GICD_ICFGR: DistReg = DistReg::shared_irq(0x0C00, 2);
const GICD_IROUTER: DistReg = DistReg::shared_irq(0x6000, 64);
// List with relevant distributor registers that we will be restoring.
// Order is taken from qemu.
// Criteria for the present list of registers: only R/W registers, implementation specific registers
// are not saved. GICD_CPENDSGIR and GICD_SPENDSGIR are not saved since these registers are not used
// when affinity routing is enabled. Affinity routing GICv3 is enabled by default unless Firecracker
// clears the ICD_CTLR.ARE bit which it does not do.
static VGIC_DIST_REGS: &[DistReg] = &[
GICD_CTLR,
GICD_STATUSR,
GICD_ICENABLER,
GICD_ISENABLER,
GICD_IGROUPR,
GICD_IROUTER,
GICD_ICFGR,
GICD_ICPENDR,
GICD_ISPENDR,
GICD_ICACTIVER,
GICD_ISACTIVER,
GICD_IPRIORITYR,
];
/// Some registers have variable lengths since they dedicate a specific number of bits to
/// each interrupt. So, their length depends on the number of interrupts.
/// (i.e the ones that are represented as GICD_REG<n>) in the documentation mentioned above.
pub struct SharedIrqReg {
/// The offset from the component address. The register is memory mapped here.
offset: u64,
/// Number of bits per interrupt.
bits_per_irq: u8,
}
impl MmioReg for SharedIrqReg {
fn range(&self) -> Range<u64> {
// The ARM® TrustZone® implements a protection logic which contains a
// read-as-zero/write-ignore (RAZ/WI) policy.
// The first part of a shared-irq register, the one corresponding to the
// SGI and PPI IRQs (0-32) is RAZ/WI, so we skip it.
let start = self.offset + u64::from(SPI_START) * u64::from(self.bits_per_irq) / 8;
let size_in_bits = u64::from(self.bits_per_irq) * u64::from(GSI_LEGACY_NUM);
let mut size_in_bytes = size_in_bits / 8;
if size_in_bits % 8 > 0 {
size_in_bytes += 1;
}
start..start + size_in_bytes
}
}
enum DistReg {
Simple(SimpleReg),
SharedIrq(SharedIrqReg),
}
impl DistReg {
const fn simple(offset: u64, size: u16) -> DistReg {
DistReg::Simple(SimpleReg::new(offset, size))
}
const fn shared_irq(offset: u64, bits_per_irq: u8) -> DistReg {
DistReg::SharedIrq(SharedIrqReg {
offset,
bits_per_irq,
})
}
}
impl MmioReg for DistReg {
fn range(&self) -> Range<u64> {
match self {
DistReg::Simple(reg) => reg.range(),
DistReg::SharedIrq(reg) => reg.range(),
}
}
}
struct DistRegEngine {}
impl VgicRegEngine for DistRegEngine {
type Reg = DistReg;
type RegChunk = u32;
fn group() -> u32 {
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
}
fn mpidr_mask() -> u64 {
0
}
}
pub(crate) fn get_dist_regs(fd: &DeviceFd) -> Result<Vec<GicRegState<u32>>, GicError> {
DistRegEngine::get_regs_data(fd, Box::new(VGIC_DIST_REGS.iter()), 0)
}
pub(crate) fn set_dist_regs(fd: &DeviceFd, state: &[GicRegState<u32>]) -> Result<(), GicError> {
DistRegEngine::set_regs_data(fd, Box::new(VGIC_DIST_REGS.iter()), state, 0)
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use kvm_ioctls::Kvm;
use super::*;
use crate::arch::aarch64::gic::{GICVersion, create_gic};
#[test]
fn test_access_dist_regs() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let _ = vm.create_vcpu(0).unwrap();
let gic_fd = create_gic(&vm, 1, Some(GICVersion::GICV3)).expect("Cannot create gic");
let res = get_dist_regs(gic_fd.device_fd());
let state = res.unwrap();
assert_eq!(state.len(), 12);
// Check GICD_CTLR size.
assert_eq!(state[0].chunks.len(), 1);
let res = set_dist_regs(gic_fd.device_fd(), &state);
res.unwrap();
unsafe { libc::close(gic_fd.device_fd().as_raw_fd()) };
let res = get_dist_regs(gic_fd.device_fd());
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), false, 1)"
);
// dropping gic_fd would double close the gic fd, so leak it
std::mem::forget(gic_fd);
}
#[test]
fn test_dist_constructors() {
let simple_dist_reg = DistReg::simple(0, 4);
let shared_dist_reg = DistReg::shared_irq(0x0010, 2);
assert_eq!(simple_dist_reg.range(), Range { start: 0, end: 4 });
assert_eq!(shared_dist_reg.range(), Range { start: 24, end: 48 });
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv3/regs/mod.rs | src/vmm/src/arch/aarch64/gic/gicv3/regs/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
mod dist_regs;
mod icc_regs;
pub mod its_regs;
mod redist_regs;
use its_regs::{ItsRegisterState, its_save_tables};
use kvm_ioctls::DeviceFd;
use crate::arch::aarch64::gic::GicError;
use crate::arch::aarch64::gic::regs::{GicState, GicVcpuState};
/// Save the state of the GIC device.
pub fn save_state(
gic_device: &DeviceFd,
its_device: &DeviceFd,
mpidrs: &[u64],
) -> Result<GicState, GicError> {
// Flush redistributors pending tables to guest RAM.
super::save_pending_tables(gic_device)?;
// Flush ITS tables into guest memory.
its_save_tables(its_device)?;
let mut vcpu_states = Vec::with_capacity(mpidrs.len());
for mpidr in mpidrs {
vcpu_states.push(GicVcpuState {
rdist: redist_regs::get_redist_regs(gic_device, *mpidr)?,
icc: icc_regs::get_icc_regs(gic_device, *mpidr)?,
})
}
let its_state = ItsRegisterState::save(its_device)?;
Ok(GicState {
dist: dist_regs::get_dist_regs(gic_device)?,
gic_vcpu_states: vcpu_states,
its_state: Some(its_state),
})
}
/// Restore the state of the GIC device.
pub fn restore_state(
gic_device: &DeviceFd,
its_device: &DeviceFd,
mpidrs: &[u64],
state: &GicState,
) -> Result<(), GicError> {
dist_regs::set_dist_regs(gic_device, &state.dist)?;
if mpidrs.len() != state.gic_vcpu_states.len() {
return Err(GicError::InconsistentVcpuCount);
}
for (mpidr, vcpu_state) in mpidrs.iter().zip(&state.gic_vcpu_states) {
redist_regs::set_redist_regs(gic_device, *mpidr, &vcpu_state.rdist)?;
icc_regs::set_icc_regs(gic_device, *mpidr, &vcpu_state.icc)?;
}
// Safe to unwrap here, as we know we support an ITS device, so `its_state.is_some()` is always
// `true`.
state.its_state.as_ref().unwrap().restore(its_device)
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use kvm_ioctls::Kvm;
use super::*;
use crate::arch::aarch64::gic::{GICVersion, create_gic};
#[test]
fn test_vm_save_restore_state() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let gic = create_gic(&vm, 1, Some(GICVersion::GICV3)).expect("Cannot create gic");
let gic_fd = gic.device_fd();
let its_fd = gic.its_fd().unwrap();
let mpidr = vec![1];
let res = save_state(gic_fd, its_fd, &mpidr);
// We will receive an error if trying to call before creating vcpu.
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(22), false, 5)"
);
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let _vcpu = vm.create_vcpu(0).unwrap();
let gic = create_gic(&vm, 1, Some(GICVersion::GICV3)).expect("Cannot create gic");
let gic_fd = gic.device_fd();
let its_fd = gic.its_fd().unwrap();
let vm_state = save_state(gic_fd, its_fd, &mpidr).unwrap();
let val: u32 = 0;
let gicd_statusr_off = 0x0010u64;
let mut gic_dist_attr = kvm_bindings::kvm_device_attr {
group: kvm_bindings::KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
attr: gicd_statusr_off,
addr: &val as *const u32 as u64,
flags: 0,
};
unsafe {
gic_fd.get_device_attr(&mut gic_dist_attr).unwrap();
}
// The second value from the list of distributor registers is the value of the GICD_STATUSR
// register. We assert that the one saved in the bitmap is the same with the one we
// obtain with KVM_GET_DEVICE_ATTR.
let gicd_statusr = &vm_state.dist[1];
assert_eq!(gicd_statusr.chunks[0], val);
assert_eq!(vm_state.dist.len(), 12);
restore_state(gic_fd, its_fd, &mpidr, &vm_state).unwrap();
restore_state(gic_fd, its_fd, &[1, 2], &vm_state).unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv3/regs/icc_regs.rs | src/vmm/src/arch/aarch64/gic/gicv3/regs/icc_regs.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use kvm_bindings::*;
use kvm_ioctls::DeviceFd;
use crate::arch::aarch64::gic::GicError;
use crate::arch::aarch64::gic::regs::{SimpleReg, VgicRegEngine, VgicSysRegsState};
const ICC_CTLR_EL1_PRIBITS_SHIFT: u64 = 8;
const ICC_CTLR_EL1_PRIBITS_MASK: u64 = 7 << ICC_CTLR_EL1_PRIBITS_SHIFT;
// These registers are taken from the kernel. Look for `gic_v3_icc_reg_descs`.
const SYS_ICC_SRE_EL1: SimpleReg = SimpleReg::vgic_sys_reg(3, 0, 12, 12, 5);
const SYS_ICC_CTLR_EL1: SimpleReg = SimpleReg::vgic_sys_reg(3, 0, 12, 12, 4);
const SYS_ICC_IGRPEN0_EL1: SimpleReg = SimpleReg::vgic_sys_reg(3, 0, 12, 12, 6);
const SYS_ICC_IGRPEN1_EL1: SimpleReg = SimpleReg::vgic_sys_reg(3, 0, 12, 12, 7);
const SYS_ICC_PMR_EL1: SimpleReg = SimpleReg::vgic_sys_reg(3, 0, 4, 6, 0);
const SYS_ICC_BPR0_EL1: SimpleReg = SimpleReg::vgic_sys_reg(3, 0, 12, 8, 3);
const SYS_ICC_BPR1_EL1: SimpleReg = SimpleReg::vgic_sys_reg(3, 0, 12, 12, 3);
const SYS_ICC_AP0R0_EL1: SimpleReg = SimpleReg::sys_icc_ap0rn_el1(0);
const SYS_ICC_AP0R1_EL1: SimpleReg = SimpleReg::sys_icc_ap0rn_el1(1);
const SYS_ICC_AP0R2_EL1: SimpleReg = SimpleReg::sys_icc_ap0rn_el1(2);
const SYS_ICC_AP0R3_EL1: SimpleReg = SimpleReg::sys_icc_ap0rn_el1(3);
const SYS_ICC_AP1R0_EL1: SimpleReg = SimpleReg::sys_icc_ap1rn_el1(0);
const SYS_ICC_AP1R1_EL1: SimpleReg = SimpleReg::sys_icc_ap1rn_el1(1);
const SYS_ICC_AP1R2_EL1: SimpleReg = SimpleReg::sys_icc_ap1rn_el1(2);
const SYS_ICC_AP1R3_EL1: SimpleReg = SimpleReg::sys_icc_ap1rn_el1(3);
static MAIN_VGIC_ICC_REGS: &[SimpleReg] = &[
SYS_ICC_SRE_EL1,
SYS_ICC_CTLR_EL1,
SYS_ICC_IGRPEN0_EL1,
SYS_ICC_IGRPEN1_EL1,
SYS_ICC_PMR_EL1,
SYS_ICC_BPR0_EL1,
SYS_ICC_BPR1_EL1,
];
static AP_VGIC_ICC_REGS: &[SimpleReg] = &[
SYS_ICC_AP0R0_EL1,
SYS_ICC_AP0R1_EL1,
SYS_ICC_AP0R2_EL1,
SYS_ICC_AP0R3_EL1,
SYS_ICC_AP1R0_EL1,
SYS_ICC_AP1R1_EL1,
SYS_ICC_AP1R2_EL1,
SYS_ICC_AP1R3_EL1,
];
impl SimpleReg {
const fn vgic_sys_reg(op0: u64, op1: u64, crn: u64, crm: u64, op2: u64) -> SimpleReg {
let offset = ((op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT)
& KVM_REG_ARM64_SYSREG_OP0_MASK as u64)
| ((op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) & KVM_REG_ARM64_SYSREG_OP1_MASK as u64)
| ((crn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) & KVM_REG_ARM64_SYSREG_CRN_MASK as u64)
| ((crm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) & KVM_REG_ARM64_SYSREG_CRM_MASK as u64)
| ((op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT) & KVM_REG_ARM64_SYSREG_OP2_MASK as u64);
SimpleReg::new(offset, 8)
}
const fn sys_icc_ap0rn_el1(n: u64) -> SimpleReg {
Self::vgic_sys_reg(3, 0, 12, 8, 4 | n)
}
const fn sys_icc_ap1rn_el1(n: u64) -> SimpleReg {
Self::vgic_sys_reg(3, 0, 12, 9, n)
}
}
struct VgicSysRegEngine {}
impl VgicRegEngine for VgicSysRegEngine {
type Reg = SimpleReg;
type RegChunk = u64;
fn group() -> u32 {
KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
}
#[allow(clippy::cast_sign_loss)] // bit mask
fn mpidr_mask() -> u64 {
KVM_DEV_ARM_VGIC_V3_MPIDR_MASK as u64
}
}
fn num_priority_bits(fd: &DeviceFd, mpidr: u64) -> Result<u64, GicError> {
let reg_val = &VgicSysRegEngine::get_reg_data(fd, &SYS_ICC_CTLR_EL1, mpidr)?.chunks[0];
Ok(((reg_val & ICC_CTLR_EL1_PRIBITS_MASK) >> ICC_CTLR_EL1_PRIBITS_SHIFT) + 1)
}
fn is_ap_reg_available(reg: &SimpleReg, num_priority_bits: u64) -> bool {
// As per ARMv8 documentation:
// https://static.docs.arm.com/ihi0069/c/IHI0069C_gic_architecture_specification.pdf
// page 178,
// ICC_AP0R1_EL1 is only implemented in implementations that support 6 or more bits of
// priority.
// ICC_AP0R2_EL1 and ICC_AP0R3_EL1 are only implemented in implementations that support
// 7 bits of priority.
if (reg == &SYS_ICC_AP0R1_EL1 || reg == &SYS_ICC_AP1R1_EL1) && num_priority_bits < 6 {
return false;
}
if (reg == &SYS_ICC_AP0R2_EL1
|| reg == &SYS_ICC_AP0R3_EL1
|| reg == &SYS_ICC_AP1R2_EL1
|| reg == &SYS_ICC_AP1R3_EL1)
&& num_priority_bits != 7
{
return false;
}
true
}
pub(crate) fn get_icc_regs(fd: &DeviceFd, mpidr: u64) -> Result<VgicSysRegsState, GicError> {
let main_icc_regs =
VgicSysRegEngine::get_regs_data(fd, Box::new(MAIN_VGIC_ICC_REGS.iter()), mpidr)?;
let num_priority_bits = num_priority_bits(fd, mpidr)?;
let mut ap_icc_regs = Vec::with_capacity(AP_VGIC_ICC_REGS.len());
for reg in AP_VGIC_ICC_REGS {
if is_ap_reg_available(reg, num_priority_bits) {
ap_icc_regs.push(Some(VgicSysRegEngine::get_reg_data(fd, reg, mpidr)?));
} else {
ap_icc_regs.push(None);
}
}
Ok(VgicSysRegsState {
main_icc_regs,
ap_icc_regs,
})
}
pub(crate) fn set_icc_regs(
fd: &DeviceFd,
mpidr: u64,
state: &VgicSysRegsState,
) -> Result<(), GicError> {
VgicSysRegEngine::set_regs_data(
fd,
Box::new(MAIN_VGIC_ICC_REGS.iter()),
&state.main_icc_regs,
mpidr,
)?;
let num_priority_bits = num_priority_bits(fd, mpidr)?;
for (reg, maybe_reg_data) in AP_VGIC_ICC_REGS.iter().zip(&state.ap_icc_regs) {
if is_ap_reg_available(reg, num_priority_bits) != maybe_reg_data.is_some() {
return Err(GicError::InvalidVgicSysRegState);
}
if let Some(reg_data) = maybe_reg_data {
VgicSysRegEngine::set_reg_data(fd, reg, reg_data, mpidr)?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use kvm_ioctls::Kvm;
use super::*;
use crate::arch::aarch64::gic::{GICVersion, create_gic};
#[test]
fn test_access_icc_regs() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let _ = vm.create_vcpu(0).unwrap();
let gic_fd = create_gic(&vm, 1, Some(GICVersion::GICV3)).expect("Cannot create gic");
let gicr_typer = 123;
let res = get_icc_regs(gic_fd.device_fd(), gicr_typer);
let mut state = res.unwrap();
assert_eq!(state.main_icc_regs.len(), 7);
assert_eq!(state.ap_icc_regs.len(), 8);
set_icc_regs(gic_fd.device_fd(), gicr_typer, &state).unwrap();
for reg in state.ap_icc_regs.iter_mut() {
*reg = None;
}
let res = set_icc_regs(gic_fd.device_fd(), gicr_typer, &state);
assert_eq!(format!("{:?}", res.unwrap_err()), "InvalidVgicSysRegState");
unsafe { libc::close(gic_fd.device_fd().as_raw_fd()) };
let res = set_icc_regs(gic_fd.device_fd(), gicr_typer, &state);
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), true, 6)"
);
let res = get_icc_regs(gic_fd.device_fd(), gicr_typer);
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), false, 6)"
);
// dropping gic_fd would double close the gic fd, so leak it
std::mem::forget(gic_fd);
}
#[test]
fn test_icc_constructors() {
let sys_reg1 = SimpleReg::vgic_sys_reg(3, 0, 12, 12, 5);
let sys_reg2 = SimpleReg::sys_icc_ap0rn_el1(1);
let sys_reg3 = SimpleReg::sys_icc_ap1rn_el1(1);
assert!(sys_reg1 == SimpleReg::new(50789, 8));
assert!(sys_reg2 == SimpleReg::new(50757, 8));
assert!(sys_reg3 == SimpleReg::new(50761, 8));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv3/regs/redist_regs.rs | src/vmm/src/arch/aarch64/gic/gicv3/regs/redist_regs.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use kvm_bindings::*;
use kvm_ioctls::DeviceFd;
use crate::arch::aarch64::gic::GicError;
use crate::arch::aarch64::gic::regs::{GicRegState, SimpleReg, VgicRegEngine};
// Relevant PPI redistributor registers that we want to save/restore.
const GICR_CTLR: SimpleReg = SimpleReg::new(0x0000, 4);
const GICR_STATUSR: SimpleReg = SimpleReg::new(0x0010, 4);
const GICR_WAKER: SimpleReg = SimpleReg::new(0x0014, 4);
const GICR_PROPBASER: SimpleReg = SimpleReg::new(0x0070, 8);
const GICR_PENDBASER: SimpleReg = SimpleReg::new(0x0078, 8);
// Relevant SGI redistributor registers that we want to save/restore.
const GICR_SGI_OFFSET: u64 = 0x0001_0000;
const GICR_IGROUPR0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0080, 4);
const GICR_ISENABLER0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0100, 4);
const GICR_ICENABLER0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0180, 4);
const GICR_ISPENDR0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0200, 4);
const GICR_ICPENDR0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0280, 4);
const GICR_ISACTIVER0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0300, 4);
const GICR_ICACTIVER0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0380, 4);
const GICR_IPRIORITYR0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0400, 32);
const GICR_ICFGR0: SimpleReg = SimpleReg::new(GICR_SGI_OFFSET + 0x0C00, 8);
// List with relevant redistributor registers that we will be restoring.
static VGIC_RDIST_REGS: &[SimpleReg] = &[
GICR_STATUSR,
GICR_WAKER,
GICR_PROPBASER,
GICR_PENDBASER,
GICR_CTLR,
];
// List with relevant SGI associated redistributor registers that we will be restoring.
static VGIC_SGI_REGS: &[SimpleReg] = &[
GICR_IGROUPR0,
GICR_ICENABLER0,
GICR_ISENABLER0,
GICR_ICFGR0,
GICR_ICPENDR0,
GICR_ISPENDR0,
GICR_ICACTIVER0,
GICR_ISACTIVER0,
GICR_IPRIORITYR0,
];
struct RedistRegEngine {}
impl VgicRegEngine for RedistRegEngine {
type Reg = SimpleReg;
type RegChunk = u32;
fn group() -> u32 {
KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
}
#[allow(clippy::cast_sign_loss)] // bit mask
fn mpidr_mask() -> u64 {
KVM_DEV_ARM_VGIC_V3_MPIDR_MASK as u64
}
}
fn redist_regs() -> Box<dyn Iterator<Item = &'static SimpleReg>> {
Box::new(VGIC_RDIST_REGS.iter().chain(VGIC_SGI_REGS))
}
pub(crate) fn get_redist_regs(
fd: &DeviceFd,
mpidr: u64,
) -> Result<Vec<GicRegState<u32>>, GicError> {
RedistRegEngine::get_regs_data(fd, redist_regs(), mpidr)
}
pub(crate) fn set_redist_regs(
fd: &DeviceFd,
mpidr: u64,
data: &[GicRegState<u32>],
) -> Result<(), GicError> {
RedistRegEngine::set_regs_data(fd, redist_regs(), data, mpidr)
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use kvm_ioctls::Kvm;
use super::*;
use crate::arch::aarch64::gic::{GICVersion, create_gic};
#[test]
fn test_access_redist_regs() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let _ = vm.create_vcpu(0).unwrap();
let gic_fd = create_gic(&vm, 1, Some(GICVersion::GICV3)).expect("Cannot create gic");
let gicr_typer = 123;
let res = get_redist_regs(gic_fd.device_fd(), gicr_typer);
let state = res.unwrap();
assert_eq!(state.len(), 14);
set_redist_regs(gic_fd.device_fd(), gicr_typer, &state).unwrap();
unsafe { libc::close(gic_fd.device_fd().as_raw_fd()) };
let res = set_redist_regs(gic_fd.device_fd(), gicr_typer, &state);
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), true, 5)"
);
let res = get_redist_regs(gic_fd.device_fd(), gicr_typer);
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), false, 5)"
);
// dropping gic_fd would double close the gic fd, so leak it
std::mem::forget(gic_fd);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv2/mod.rs | src/vmm/src/arch/aarch64/gic/gicv2/mod.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
mod regs;
use kvm_ioctls::{DeviceFd, VmFd};
use crate::arch::aarch64::gic::{GicError, GicState};
/// Represent a GIC v2 device
#[derive(Debug)]
pub struct GICv2(super::GIC);
impl std::ops::Deref for GICv2 {
type Target = super::GIC;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl GICv2 {
// Unfortunately bindgen omits defines that are based on other defines.
// See arch/arm64/include/uapi/asm/kvm.h file from the linux kernel.
const KVM_VGIC_V2_DIST_SIZE: u64 = 0x1000;
const KVM_VGIC_V2_CPU_SIZE: u64 = 0x2000;
// Device trees specific constants
const ARCH_GIC_V2_MAINT_IRQ: u32 = 8;
/// Get the address of the GICv2 distributor.
const fn get_dist_addr() -> u64 {
super::layout::MMIO32_MEM_START - GICv2::KVM_VGIC_V2_DIST_SIZE
}
/// Get the size of the GIC_v2 distributor.
const fn get_dist_size() -> u64 {
GICv2::KVM_VGIC_V2_DIST_SIZE
}
/// Get the address of the GIC_v2 CPU.
const fn get_cpu_addr() -> u64 {
GICv2::get_dist_addr() - GICv2::KVM_VGIC_V2_CPU_SIZE
}
/// Get the size of the GIC_v2 CPU.
const fn get_cpu_size() -> u64 {
GICv2::KVM_VGIC_V2_CPU_SIZE
}
pub const VERSION: u32 = kvm_bindings::kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V2;
pub fn fdt_compatibility(&self) -> &str {
"arm,gic-400"
}
pub fn fdt_maint_irq(&self) -> u32 {
GICv2::ARCH_GIC_V2_MAINT_IRQ
}
/// Create the GIC device object
pub fn create_device(fd: DeviceFd, vcpu_count: u64) -> Self {
GICv2(super::GIC {
fd,
properties: [
GICv2::get_dist_addr(),
GICv2::get_dist_size(),
GICv2::get_cpu_addr(),
GICv2::get_cpu_size(),
],
msi_properties: None,
vcpu_count,
its_device: None,
})
}
pub fn save_device(&self, mpidrs: &[u64]) -> Result<GicState, GicError> {
regs::save_state(&self.fd, mpidrs)
}
pub fn restore_device(&self, mpidrs: &[u64], state: &GicState) -> Result<(), GicError> {
regs::restore_state(&self.fd, mpidrs, state)
}
pub fn init_device_attributes(gic_device: &Self) -> Result<(), GicError> {
// Setting up the distributor attribute.
// We are placing the GIC below 1GB so we need to subtract the size of the distributor.
Self::set_device_attribute(
gic_device.device_fd(),
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_ADDR,
u64::from(kvm_bindings::KVM_VGIC_V2_ADDR_TYPE_DIST),
&GICv2::get_dist_addr() as *const u64 as u64,
0,
)?;
// Setting up the CPU attribute.
Self::set_device_attribute(
gic_device.device_fd(),
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_ADDR,
u64::from(kvm_bindings::KVM_VGIC_V2_ADDR_TYPE_CPU),
&GICv2::get_cpu_addr() as *const u64 as u64,
0,
)?;
Ok(())
}
/// Initialize a GIC device
pub fn init_device(vm: &VmFd) -> Result<DeviceFd, GicError> {
let mut gic_device = kvm_bindings::kvm_create_device {
type_: Self::VERSION,
fd: 0,
flags: 0,
};
vm.create_device(&mut gic_device)
.map_err(GicError::CreateGIC)
}
/// Method to initialize the GIC device
pub fn create(vm: &VmFd, vcpu_count: u64) -> Result<Self, GicError> {
let vgic_fd = Self::init_device(vm)?;
let device = Self::create_device(vgic_fd, vcpu_count);
Self::init_device_attributes(&device)?;
Self::finalize_device(&device)?;
Ok(device)
}
/// Finalize the setup of a GIC device
pub fn finalize_device(gic_device: &Self) -> Result<(), GicError> {
// On arm there are 3 types of interrupts: SGI (0-15), PPI (16-31), SPI (32-1020).
// SPIs are used to signal interrupts from various peripherals accessible across
// the whole system so these are the ones that we increment when adding a new virtio device.
// KVM_DEV_ARM_VGIC_GRP_NR_IRQS sets the number of interrupts (SGI, PPI, and SPI).
// Consequently, we need to add 32 to the number of SPIs ("legacy GSI").
let nr_irqs: u32 = crate::arch::GSI_LEGACY_NUM + super::layout::SPI_START;
let nr_irqs_ptr = &nr_irqs as *const u32;
Self::set_device_attribute(
gic_device.device_fd(),
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0,
nr_irqs_ptr as u64,
0,
)?;
// Finalize the GIC.
// See https://code.woboq.org/linux/linux/virt/kvm/arm/vgic/vgic-kvm-device.c.html#211.
Self::set_device_attribute(
gic_device.device_fd(),
kvm_bindings::KVM_DEV_ARM_VGIC_GRP_CTRL,
u64::from(kvm_bindings::KVM_DEV_ARM_VGIC_CTRL_INIT),
0,
0,
)?;
Ok(())
}
/// Set a GIC device attribute
pub fn set_device_attribute(
fd: &DeviceFd,
group: u32,
attr: u64,
addr: u64,
flags: u32,
) -> Result<(), GicError> {
let attr = kvm_bindings::kvm_device_attr {
flags,
group,
attr,
addr,
};
fd.set_device_attr(&attr)
.map_err(|err| GicError::DeviceAttribute(err, true, group))?;
Ok(())
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv2/regs/dist_regs.rs | src/vmm/src/arch/aarch64/gic/gicv2/regs/dist_regs.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::ops::Range;
use kvm_bindings::KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
use kvm_ioctls::DeviceFd;
use crate::arch::aarch64::gic::GicError;
use crate::arch::aarch64::gic::regs::{GicRegState, MmioReg, SimpleReg, VgicRegEngine};
use crate::arch::{GSI_LEGACY_NUM, SPI_START};
// Distributor registers as detailed at page 75 from
// https://developer.arm.com/documentation/ihi0048/latest/.
// Address offsets are relative to the Distributor base address defined
// by the system memory map.
const GICD_CTLR: DistReg = DistReg::simple(0x0, 4);
const GICD_IGROUPR: DistReg = DistReg::shared_irq(0x0080, 1);
const GICD_ISENABLER: DistReg = DistReg::shared_irq(0x0100, 1);
const GICD_ICENABLER: DistReg = DistReg::shared_irq(0x0180, 1);
const GICD_ISPENDR: DistReg = DistReg::shared_irq(0x0200, 1);
const GICD_ICPENDR: DistReg = DistReg::shared_irq(0x0280, 1);
const GICD_ISACTIVER: DistReg = DistReg::shared_irq(0x0300, 1);
const GICD_ICACTIVER: DistReg = DistReg::shared_irq(0x0380, 1);
const GICD_IPRIORITYR: DistReg = DistReg::shared_irq(0x0400, 8);
const GICD_ICFGR: DistReg = DistReg::shared_irq(0x0C00, 2);
const GICD_CPENDSGIR: DistReg = DistReg::simple(0xF10, 16);
const GICD_SPENDSGIR: DistReg = DistReg::simple(0xF20, 16);
// List with relevant distributor registers that we will be restoring.
// Order is taken from qemu.
// Criteria for the present list of registers: only R/W registers, implementation specific registers
// are not saved.
static VGIC_DIST_REGS: &[DistReg] = &[
GICD_CTLR,
GICD_ICENABLER,
GICD_ISENABLER,
GICD_IGROUPR,
GICD_ICFGR,
GICD_ICPENDR,
GICD_ISPENDR,
GICD_ICACTIVER,
GICD_ISACTIVER,
GICD_IPRIORITYR,
GICD_CPENDSGIR,
GICD_SPENDSGIR,
];
/// Some registers have variable lengths since they dedicate a specific number of bits to
/// each interrupt. So, their length depends on the number of interrupts.
/// (i.e the ones that are represented as GICD_REG<n>) in the documentation mentioned above.
pub struct SharedIrqReg {
/// The offset from the component address. The register is memory mapped here.
offset: u64,
/// Number of bits per interrupt.
bits_per_irq: u8,
}
impl MmioReg for SharedIrqReg {
fn range(&self) -> Range<u64> {
// The ARM® TrustZone® implements a protection logic which contains a
// read-as-zero/write-ignore (RAZ/WI) policy.
// The first part of a shared-irq register, the one corresponding to the
// SGI and PPI IRQs (0-32) is RAZ/WI, so we skip it.
let start = self.offset + u64::from(SPI_START) * u64::from(self.bits_per_irq) / 8;
let size_in_bits = u64::from(self.bits_per_irq) * u64::from(GSI_LEGACY_NUM);
let mut size_in_bytes = size_in_bits / 8;
if size_in_bits % 8 > 0 {
size_in_bytes += 1;
}
start..start + size_in_bytes
}
}
enum DistReg {
Simple(SimpleReg),
SharedIrq(SharedIrqReg),
}
impl DistReg {
const fn simple(offset: u64, size: u16) -> DistReg {
DistReg::Simple(SimpleReg::new(offset, size))
}
const fn shared_irq(offset: u64, bits_per_irq: u8) -> DistReg {
DistReg::SharedIrq(SharedIrqReg {
offset,
bits_per_irq,
})
}
}
impl MmioReg for DistReg {
fn range(&self) -> Range<u64> {
match self {
DistReg::Simple(reg) => reg.range(),
DistReg::SharedIrq(reg) => reg.range(),
}
}
}
struct DistRegEngine {}
impl VgicRegEngine for DistRegEngine {
type Reg = DistReg;
type RegChunk = u32;
fn group() -> u32 {
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
}
fn mpidr_mask() -> u64 {
0
}
}
pub(crate) fn get_dist_regs(fd: &DeviceFd) -> Result<Vec<GicRegState<u32>>, GicError> {
DistRegEngine::get_regs_data(fd, Box::new(VGIC_DIST_REGS.iter()), 0)
}
pub(crate) fn set_dist_regs(fd: &DeviceFd, state: &[GicRegState<u32>]) -> Result<(), GicError> {
DistRegEngine::set_regs_data(fd, Box::new(VGIC_DIST_REGS.iter()), state, 0)
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use kvm_ioctls::Kvm;
use super::*;
use crate::arch::aarch64::gic::{GICVersion, GicError, create_gic};
#[test]
fn test_access_dist_regs() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let _ = vm.create_vcpu(0).unwrap();
let gic_fd = match create_gic(&vm, 1, Some(GICVersion::GICV2)) {
Ok(gic_fd) => gic_fd,
Err(GicError::CreateGIC(_)) => return,
_ => panic!("Failed to open setup GICv2"),
};
let res = get_dist_regs(gic_fd.device_fd());
let state = res.unwrap();
assert_eq!(state.len(), 7);
// Check GICD_CTLR size.
assert_eq!(state[0].chunks.len(), 1);
let res = set_dist_regs(gic_fd.device_fd(), &state);
res.unwrap();
unsafe { libc::close(gic_fd.device_fd().as_raw_fd()) };
let res = get_dist_regs(gic_fd.device_fd());
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), false, 1)"
);
// dropping gic_fd would double close the gic fd, so leak it
std::mem::forget(gic_fd);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv2/regs/mod.rs | src/vmm/src/arch/aarch64/gic/gicv2/regs/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
mod dist_regs;
mod icc_regs;
use kvm_ioctls::DeviceFd;
use crate::arch::aarch64::gic::GicError;
use crate::arch::aarch64::gic::regs::{GicState, GicVcpuState};
/// Save the state of the GIC device.
pub fn save_state(fd: &DeviceFd, mpidrs: &[u64]) -> Result<GicState, GicError> {
let mut vcpu_states = Vec::with_capacity(mpidrs.len());
for mpidr in mpidrs {
vcpu_states.push(GicVcpuState {
rdist: Vec::new(),
icc: icc_regs::get_icc_regs(fd, *mpidr)?,
})
}
Ok(GicState {
dist: dist_regs::get_dist_regs(fd)?,
gic_vcpu_states: vcpu_states,
..Default::default()
})
}
/// Restore the state of the GIC device.
pub fn restore_state(fd: &DeviceFd, mpidrs: &[u64], state: &GicState) -> Result<(), GicError> {
dist_regs::set_dist_regs(fd, &state.dist)?;
if mpidrs.len() != state.gic_vcpu_states.len() {
return Err(GicError::InconsistentVcpuCount);
}
for (mpidr, vcpu_state) in mpidrs.iter().zip(&state.gic_vcpu_states) {
icc_regs::set_icc_regs(fd, *mpidr, &vcpu_state.icc)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use kvm_ioctls::Kvm;
use super::*;
use crate::arch::aarch64::gic::{GICVersion, create_gic};
#[test]
fn test_vm_save_restore_state() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let gic_fd = match create_gic(&vm, 1, Some(GICVersion::GICV2)) {
Ok(gic_fd) => gic_fd,
Err(GicError::CreateGIC(_)) => return,
_ => panic!("Failed to open setup GICv2"),
};
let mpidr = vec![0];
let res = save_state(gic_fd.device_fd(), &mpidr);
// We will receive an error if trying to call before creating vcpu.
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(22), false, 2)"
);
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let _vcpu = vm.create_vcpu(0).unwrap();
let gic = create_gic(&vm, 1, Some(GICVersion::GICV2)).expect("Cannot create gic");
let gic_fd = gic.device_fd();
let vm_state = save_state(gic_fd, &mpidr).unwrap();
let val: u32 = 0;
let gicd_statusr_off = 0x0010u64;
let mut gic_dist_attr = kvm_bindings::kvm_device_attr {
group: kvm_bindings::KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
attr: gicd_statusr_off,
addr: &val as *const u32 as u64,
flags: 0,
};
unsafe {
gic_fd.get_device_attr(&mut gic_dist_attr).unwrap();
}
// The second value from the list of distributor registers is the value of the GICD_STATUSR
// register. We assert that the one saved in the bitmap is the same with the one we
// obtain with KVM_GET_DEVICE_ATTR.
let gicd_statusr = &vm_state.dist[1];
assert_eq!(gicd_statusr.chunks[0], val);
assert_eq!(vm_state.dist.len(), 7);
restore_state(gic_fd, &mpidr, &vm_state).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/arch/aarch64/gic/gicv2/regs/icc_regs.rs | src/vmm/src/arch/aarch64/gic/gicv2/regs/icc_regs.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use kvm_bindings::*;
use kvm_ioctls::DeviceFd;
use crate::arch::aarch64::gic::GicError;
use crate::arch::aarch64::gic::regs::{SimpleReg, VgicRegEngine, VgicSysRegsState};
// CPU interface registers as detailed at page 76 from
// https://developer.arm.com/documentation/ihi0048/latest/.
// Address offsets are relative to the cpu interface base address defined
// by the system memory map.
// Criteria for the present list of registers: only R/W registers, optional registers are not saved.
// GICC_NSAPR are not saved since they are only present in GICv2 implementations that include the
// GIC security extensions so it might crash on some systems.
const GICC_CTLR: SimpleReg = SimpleReg::new(0x0, 4);
const GICC_PMR: SimpleReg = SimpleReg::new(0x04, 4);
const GICC_BPR: SimpleReg = SimpleReg::new(0x08, 4);
const GICC_APBR: SimpleReg = SimpleReg::new(0x001C, 4);
const GICC_APR1: SimpleReg = SimpleReg::new(0x00D0, 4);
const GICC_APR2: SimpleReg = SimpleReg::new(0x00D4, 4);
const GICC_APR3: SimpleReg = SimpleReg::new(0x00D8, 4);
const GICC_APR4: SimpleReg = SimpleReg::new(0x00DC, 4);
static MAIN_VGIC_ICC_REGS: &[SimpleReg] = &[
GICC_CTLR, GICC_PMR, GICC_BPR, GICC_APBR, GICC_APR1, GICC_APR2, GICC_APR3, GICC_APR4,
];
const KVM_DEV_ARM_VGIC_CPUID_SHIFT: u32 = 32;
const KVM_DEV_ARM_VGIC_OFFSET_SHIFT: u32 = 0;
struct VgicSysRegEngine {}
impl VgicRegEngine for VgicSysRegEngine {
type Reg = SimpleReg;
type RegChunk = u64;
fn group() -> u32 {
KVM_DEV_ARM_VGIC_GRP_CPU_REGS
}
fn kvm_device_attr(offset: u64, val: &mut Self::RegChunk, cpuid: u64) -> kvm_device_attr {
kvm_device_attr {
group: Self::group(),
attr: ((cpuid << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
& (0xff << KVM_DEV_ARM_VGIC_CPUID_SHIFT))
| ((offset << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
& (0xffffffff << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)),
addr: val as *mut Self::RegChunk as u64,
flags: 0,
}
}
}
pub(crate) fn get_icc_regs(fd: &DeviceFd, mpidr: u64) -> Result<VgicSysRegsState, GicError> {
let main_icc_regs =
VgicSysRegEngine::get_regs_data(fd, Box::new(MAIN_VGIC_ICC_REGS.iter()), mpidr)?;
Ok(VgicSysRegsState {
main_icc_regs,
ap_icc_regs: Vec::new(),
})
}
pub(crate) fn set_icc_regs(
fd: &DeviceFd,
mpidr: u64,
state: &VgicSysRegsState,
) -> Result<(), GicError> {
VgicSysRegEngine::set_regs_data(
fd,
Box::new(MAIN_VGIC_ICC_REGS.iter()),
&state.main_icc_regs,
mpidr,
)?;
Ok(())
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use kvm_ioctls::Kvm;
use super::*;
use crate::arch::aarch64::gic::{GICVersion, GicError, create_gic};
#[test]
fn test_access_icc_regs() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
let _ = vm.create_vcpu(0).unwrap();
let gic_fd = match create_gic(&vm, 1, Some(GICVersion::GICV2)) {
Ok(gic_fd) => gic_fd,
Err(GicError::CreateGIC(_)) => return,
_ => panic!("Failed to open setup GICv2"),
};
let cpu_id = 0;
let res = get_icc_regs(gic_fd.device_fd(), cpu_id);
let state = res.unwrap();
assert_eq!(state.main_icc_regs.len(), 8);
assert_eq!(state.ap_icc_regs.len(), 0);
set_icc_regs(gic_fd.device_fd(), cpu_id, &state).unwrap();
unsafe { libc::close(gic_fd.device_fd().as_raw_fd()) };
let res = set_icc_regs(gic_fd.device_fd(), cpu_id, &state);
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), true, 2)"
);
let res = get_icc_regs(gic_fd.device_fd(), cpu_id);
assert_eq!(
format!("{:?}", res.unwrap_err()),
"DeviceAttribute(Error(9), false, 2)"
);
// dropping gic_fd would double close the gic fd, so leak it
std::mem::forget(gic_fd);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/snapshot/persist.rs | src/vmm/src/snapshot/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines an abstract interface for saving/restoring a component from state.
/// An abstract interface for saving/restoring a component using a specific state.
pub trait Persist<'a>
where
Self: Sized,
{
/// The type of the object representing the state of the component.
type State;
/// The type of the object holding the constructor arguments.
type ConstructorArgs;
/// The type of the error that can occur while constructing the object.
type Error;
/// Returns the current state of the component.
fn save(&self) -> Self::State;
/// Constructs a component from a specified state.
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error>;
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/snapshot/mod.rs | src/vmm/src/snapshot/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Provides serialization and deserialization facilities and implements a persistent storage
//! format for Firecracker state snapshots.
//!
//! The `Snapshot` API manages serialization and deserialization of collections of objects
//! that implement the `serde` `Serialize`, `Deserialize` trait. Currently, we use
//! [`bincode`](https://docs.rs/bincode/latest/bincode/) for performing the serialization.
//!
//! The snapshot format uses the following layout:
//!
//! |-----------------------------|
//! | 64 bit magic_id |
//! |-----------------------------|
//! | version string |
//! |-----------------------------|
//! | State |
//! |-----------------------------|
//! | optional CRC64 |
//! |-----------------------------|
//!
//!
//! The snapshot format uses a version value in the form of `MAJOR.MINOR.PATCH`. The version is
//! provided by the library clients (it is not tied to this crate).
pub mod crc;
mod persist;
use std::fmt::Debug;
use std::io::{Read, Write};
use bincode::config;
use bincode::config::{Configuration, Fixint, Limit, LittleEndian};
use bincode::error::{DecodeError, EncodeError};
use crc64::crc64;
use semver::Version;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::persist::SNAPSHOT_VERSION;
use crate::snapshot::crc::CRC64Writer;
pub use crate::snapshot::persist::Persist;
use crate::utils::mib_to_bytes;
#[cfg(target_arch = "x86_64")]
const SNAPSHOT_MAGIC_ID: u64 = 0x0710_1984_8664_0000u64;
/// Constant bounding how much memory bincode may allocate during vmstate file deserialization
const DESERIALIZATION_BYTES_LIMIT: usize = mib_to_bytes(10);
const BINCODE_CONFIG: Configuration<LittleEndian, Fixint, Limit<DESERIALIZATION_BYTES_LIMIT>> =
config::standard()
.with_fixed_int_encoding()
.with_limit::<DESERIALIZATION_BYTES_LIMIT>()
.with_little_endian();
#[cfg(target_arch = "aarch64")]
const SNAPSHOT_MAGIC_ID: u64 = 0x0710_1984_AAAA_0000u64;
/// Error definitions for the Snapshot API.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum SnapshotError {
/// CRC64 validation failed
Crc64,
/// Invalid data version: {0}
InvalidFormatVersion(Version),
/// Magic value does not match arch: {0}
InvalidMagic(u64),
/// An error occured during bincode encoding: {0}
Encode(#[from] EncodeError),
/// An error occured during bincode decoding: {0}
Decode(#[from] DecodeError),
/// IO Error: {0}
Io(#[from] std::io::Error),
}
fn serialize<S: Serialize, W: Write>(data: &S, write: &mut W) -> Result<(), SnapshotError> {
bincode::serde::encode_into_std_write(data, write, BINCODE_CONFIG)
.map_err(SnapshotError::Encode)
.map(|_| ())
}
/// Firecracker snapshot header
#[derive(Debug, Serialize, Deserialize)]
struct SnapshotHdr {
/// magic value
magic: u64,
/// Snapshot data version
version: Version,
}
impl SnapshotHdr {
fn load(buf: &mut &[u8]) -> Result<Self, SnapshotError> {
let (hdr, bytes_read) = bincode::serde::decode_from_slice::<Self, _>(buf, BINCODE_CONFIG)?;
if hdr.magic != SNAPSHOT_MAGIC_ID {
return Err(SnapshotError::InvalidMagic(hdr.magic));
}
if hdr.version.major != SNAPSHOT_VERSION.major || hdr.version.minor > SNAPSHOT_VERSION.minor
{
return Err(SnapshotError::InvalidFormatVersion(hdr.version));
}
*buf = &buf[bytes_read..];
Ok(hdr)
}
}
/// Assumes the raw bytes stream read from the given [`Read`] instance is a snapshot file,
/// and returns the version of it.
pub fn get_format_version<R: Read>(reader: &mut R) -> Result<Version, SnapshotError> {
let hdr: SnapshotHdr = bincode::serde::decode_from_std_read(reader, BINCODE_CONFIG)?;
Ok(hdr.version)
}
/// Firecracker snapshot type
///
/// A type used to store and load Firecracker snapshots of a particular version
#[derive(Debug, Serialize)]
pub struct Snapshot<Data> {
header: SnapshotHdr,
/// The data stored int his [`Snapshot`]
pub data: Data,
}
impl<Data> Snapshot<Data> {
/// Constructs a new snapshot with the given `data`.
pub fn new(data: Data) -> Self {
Self {
header: SnapshotHdr {
magic: SNAPSHOT_MAGIC_ID,
version: SNAPSHOT_VERSION.clone(),
},
data,
}
}
/// Gets the version of this snapshot
pub fn version(&self) -> &Version {
&self.header.version
}
}
impl<Data: DeserializeOwned> Snapshot<Data> {
pub(crate) fn load_without_crc_check(mut buf: &[u8]) -> Result<Self, SnapshotError> {
let header = SnapshotHdr::load(&mut buf)?;
let data = bincode::serde::decode_from_slice(buf, BINCODE_CONFIG)?.0;
Ok(Self { header, data })
}
/// Loads a snapshot from the given [`Read`] instance, performing all validations
/// (CRC, snapshot magic value, snapshot version).
pub fn load<R: Read>(reader: &mut R) -> Result<Self, SnapshotError> {
// read_to_end internally right-sizes the buffer, so no reallocations due to growing buffers
// will happen.
let mut buf = Vec::new();
reader.read_to_end(&mut buf)?;
let snapshot = Self::load_without_crc_check(buf.as_slice())?;
let computed_checksum = crc64(0, buf.as_slice());
// When we read the entire file, we also read the checksum into the buffer. The CRC has the
// property that crc(0, buf.as_slice()) == 0 iff the last 8 bytes of buf are the checksum
// of all the preceeding bytes, and this is the property we are using here.
if computed_checksum != 0 {
return Err(SnapshotError::Crc64);
}
Ok(snapshot)
}
}
impl<Data: Serialize> Snapshot<Data> {
/// Saves `self` to the given [`Write`] instance, computing the CRC of the written data,
/// and then writing the CRC into the `Write` instance, too.
pub fn save<W: Write>(&self, writer: &mut W) -> Result<(), SnapshotError> {
let mut crc_writer = CRC64Writer::new(writer);
serialize(self, &mut crc_writer)?;
serialize(&crc_writer.checksum(), crc_writer.writer)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::persist::MicrovmState;
#[test]
fn test_snapshot_restore() {
let state = MicrovmState::default();
let mut buf = Vec::new();
Snapshot::new(state).save(&mut buf).unwrap();
Snapshot::<MicrovmState>::load(&mut buf.as_slice()).unwrap();
}
#[test]
fn test_parse_version_from_file() {
let snapshot = Snapshot::new(42);
// Enough memory for the header, 1 byte and the CRC
let mut snapshot_data = vec![0u8; 100];
snapshot.save(&mut snapshot_data.as_mut_slice()).unwrap();
assert_eq!(
get_format_version(&mut snapshot_data.as_slice()).unwrap(),
SNAPSHOT_VERSION
);
}
#[test]
fn test_bad_reader() {
#[derive(Debug)]
struct BadReader;
impl Read for BadReader {
fn read(&mut self, _buf: &mut [u8]) -> std::io::Result<usize> {
Err(std::io::ErrorKind::InvalidInput.into())
}
}
let mut reader = BadReader {};
assert!(
matches!(Snapshot::<()>::load(&mut reader), Err(SnapshotError::Io(inner)) if inner.kind() == std::io::ErrorKind::InvalidInput)
);
}
#[test]
fn test_bad_magic() {
let mut data = vec![0u8; 100];
let snapshot = Snapshot::new(());
snapshot.save(&mut data.as_mut_slice()).unwrap();
// Writing dummy values in the first bytes of the snapshot data (we are on little-endian
// machines) should trigger an `Error::InvalidMagic` error.
data[0] = 0x01;
data[1] = 0x02;
data[2] = 0x03;
data[3] = 0x04;
data[4] = 0x42;
data[5] = 0x43;
data[6] = 0x44;
data[7] = 0x45;
assert!(matches!(
SnapshotHdr::load(&mut data.as_slice()),
Err(SnapshotError::InvalidMagic(0x4544_4342_0403_0201u64))
));
}
#[test]
fn test_bad_crc() {
let mut data = vec![0u8; 100];
let snapshot = Snapshot::new(());
// Write the snapshot without CRC, so that when loading with CRC check, we'll read
// zeros for the CRC and fail.
serialize(&snapshot, &mut data.as_mut_slice()).unwrap();
assert!(matches!(
Snapshot::<()>::load(&mut std::io::Cursor::new(data.as_slice())),
Err(SnapshotError::Crc64)
));
}
#[test]
fn test_bad_version() {
let mut data = vec![0u8; 100];
// Different major version: shouldn't work
let mut snapshot = Snapshot::new(());
snapshot.header.version.major = SNAPSHOT_VERSION.major + 1;
snapshot.save(&mut data.as_mut_slice()).unwrap();
assert!(matches!(
Snapshot::<()>::load_without_crc_check(data.as_slice()),
Err(SnapshotError::InvalidFormatVersion(v)) if v.major == SNAPSHOT_VERSION.major + 1
));
// minor > SNAPSHOT_VERSION.minor: shouldn't work
let mut snapshot = Snapshot::new(());
snapshot.header.version.minor = SNAPSHOT_VERSION.minor + 1;
snapshot.save(&mut data.as_mut_slice()).unwrap();
assert!(matches!(
Snapshot::<()>::load_without_crc_check(data.as_slice()),
Err(SnapshotError::InvalidFormatVersion(v)) if v.minor == SNAPSHOT_VERSION.minor + 1
));
// But we can support minor versions smaller or equal to ours. We also support
// all patch versions within our supported major.minor version.
let snapshot = Snapshot::new(());
snapshot.save(&mut data.as_mut_slice()).unwrap();
Snapshot::<()>::load_without_crc_check(data.as_slice()).unwrap();
if SNAPSHOT_VERSION.minor != 0 {
let mut snapshot = Snapshot::new(());
snapshot.header.version.minor = SNAPSHOT_VERSION.minor - 1;
snapshot.save(&mut data.as_mut_slice()).unwrap();
Snapshot::<()>::load_without_crc_check(data.as_slice()).unwrap();
}
let mut snapshot = Snapshot::new(());
snapshot.header.version.patch = 0;
snapshot.save(&mut data.as_mut_slice()).unwrap();
Snapshot::<()>::load_without_crc_check(data.as_slice()).unwrap();
let mut snapshot = Snapshot::new(());
snapshot.header.version.patch = SNAPSHOT_VERSION.patch + 1;
snapshot.save(&mut data.as_mut_slice()).unwrap();
Snapshot::<()>::load_without_crc_check(data.as_slice()).unwrap();
let mut snapshot = Snapshot::new(());
snapshot.header.version.patch = 1024;
snapshot.save(&mut data.as_mut_slice()).unwrap();
Snapshot::<()>::load_without_crc_check(data.as_slice()).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/snapshot/crc.rs | src/vmm/src/snapshot/crc.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Implements readers and writers that compute the CRC64 checksum of the bytes
//! read/written.
use std::io::Write;
use crc64::crc64;
/// Computes the CRC64 checksum of the written bytes.
///
/// ```
/// use std::io::Write;
///
/// use vmm::snapshot::crc::CRC64Writer;
///
/// let mut buf = vec![0; 16];
/// let write_buf = vec![123; 16];
/// let mut slice = buf.as_mut_slice();
///
/// // Create a new writer from slice.
/// let mut crc_writer = CRC64Writer::new(&mut slice);
///
/// crc_writer.write_all(&write_buf.as_slice()).unwrap();
/// assert_eq!(crc_writer.checksum(), 0x29D5_3572_1632_6566);
/// assert_eq!(write_buf, buf);
/// ```
#[derive(Debug)]
pub struct CRC64Writer<T> {
/// The underlying raw writer. Using this directly will bypass CRC computation!
pub writer: T,
crc64: u64,
}
impl<T> CRC64Writer<T>
where
T: Write,
{
/// Create a new writer.
pub fn new(writer: T) -> Self {
CRC64Writer { crc64: 0, writer }
}
/// Returns the current checksum value.
pub fn checksum(&self) -> u64 {
self.crc64
}
}
impl<T> Write for CRC64Writer<T>
where
T: Write,
{
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let bytes_written = self.writer.write(buf)?;
self.crc64 = crc64(self.crc64, &buf[..bytes_written]);
Ok(bytes_written)
}
fn flush(&mut self) -> std::io::Result<()> {
self.writer.flush()
}
}
#[cfg(test)]
mod tests {
use super::{CRC64Writer, Write};
#[test]
fn test_crc_new() {
let mut buf = vec![0; 5];
let mut slice = buf.as_mut_slice();
let crc_writer = CRC64Writer::new(&mut slice);
assert_eq!(crc_writer.crc64, 0);
assert_eq!(crc_writer.writer, &[0; 5]);
assert_eq!(crc_writer.checksum(), 0);
}
#[test]
fn test_crc_write() {
let mut buf = vec![0; 16];
let write_buf = vec![123; 16];
let mut slice = buf.as_mut_slice();
let mut crc_writer = CRC64Writer::new(&mut slice);
crc_writer.write_all(write_buf.as_slice()).unwrap();
crc_writer.flush().unwrap();
assert_eq!(crc_writer.checksum(), 0x29D5_3572_1632_6566);
assert_eq!(crc_writer.checksum(), crc_writer.crc64);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/pci/configuration.rs | src/vmm/src/pci/configuration.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use std::sync::{Arc, Mutex};
use byteorder::{ByteOrder, LittleEndian};
use pci::{PciCapabilityId, PciClassCode, PciSubclass};
use serde::{Deserialize, Serialize};
use super::BarReprogrammingParams;
use super::msix::MsixConfig;
use crate::logger::{info, warn};
use crate::utils::u64_to_usize;
// The number of 32bit registers in the config space, 4096 bytes.
const NUM_CONFIGURATION_REGISTERS: usize = 1024;
const STATUS_REG: usize = 1;
const STATUS_REG_CAPABILITIES_USED_MASK: u32 = 0x0010_0000;
const BAR0_REG: usize = 4;
const ROM_BAR_REG: usize = 12;
const BAR_MEM_ADDR_MASK: u32 = 0xffff_fff0;
const ROM_BAR_ADDR_MASK: u32 = 0xffff_f800;
const MSI_CAPABILITY_REGISTER_MASK: u32 = 0x0071_0000;
const MSIX_CAPABILITY_REGISTER_MASK: u32 = 0xc000_0000;
const NUM_BAR_REGS: usize = 6;
const CAPABILITY_LIST_HEAD_OFFSET: usize = 0x34;
const FIRST_CAPABILITY_OFFSET: usize = 0x40;
const CAPABILITY_MAX_OFFSET: usize = 192;
/// A PCI capability list. Devices can optionally specify capabilities in their configuration space.
pub trait PciCapability {
/// Bytes of the PCI capability
fn bytes(&self) -> &[u8];
/// Id of the PCI capability
fn id(&self) -> PciCapabilityId;
}
// This encodes the BAR size as expected by the software running inside the guest.
// It assumes that bar_size is not 0
fn encode_64_bits_bar_size(bar_size: u64) -> (u32, u32) {
assert_ne!(bar_size, 0);
let result = !(bar_size - 1);
let result_hi = (result >> 32) as u32;
let result_lo = (result & 0xffff_ffff) as u32;
(result_hi, result_lo)
}
// This decoes the BAR size from the value stored in the BAR registers.
fn decode_64_bits_bar_size(bar_size_hi: u32, bar_size_lo: u32) -> u64 {
let bar_size: u64 = ((bar_size_hi as u64) << 32) | (bar_size_lo as u64);
let size = !bar_size + 1;
assert_ne!(size, 0);
size
}
#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)]
struct PciBar {
addr: u32,
size: u32,
used: bool,
}
/// PCI configuration space state for (de)serialization
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PciConfigurationState {
registers: Vec<u32>,
writable_bits: Vec<u32>,
bars: Vec<PciBar>,
last_capability: Option<(usize, usize)>,
msix_cap_reg_idx: Option<usize>,
}
#[derive(Debug)]
/// Contains the configuration space of a PCI node.
///
/// See the [specification](https://en.wikipedia.org/wiki/PCI_configuration_space).
/// The configuration space is accessed with DWORD reads and writes from the guest.
pub struct PciConfiguration {
registers: [u32; NUM_CONFIGURATION_REGISTERS],
writable_bits: [u32; NUM_CONFIGURATION_REGISTERS], // writable bits for each register.
bars: [PciBar; NUM_BAR_REGS],
// Contains the byte offset and size of the last capability.
last_capability: Option<(usize, usize)>,
msix_cap_reg_idx: Option<usize>,
msix_config: Option<Arc<Mutex<MsixConfig>>>,
}
impl PciConfiguration {
#[allow(clippy::too_many_arguments)]
/// Create a new type 0 PCI configuration
pub fn new_type0(
vendor_id: u16,
device_id: u16,
revision_id: u8,
class_code: PciClassCode,
subclass: &dyn PciSubclass,
subsystem_vendor_id: u16,
subsystem_id: u16,
msix_config: Option<Arc<Mutex<MsixConfig>>>,
) -> Self {
let mut registers = [0u32; NUM_CONFIGURATION_REGISTERS];
let mut writable_bits = [0u32; NUM_CONFIGURATION_REGISTERS];
registers[0] = (u32::from(device_id) << 16) | u32::from(vendor_id);
// TODO(dverkamp): Status should be write-1-to-clear
writable_bits[1] = 0x0000_ffff; // Status (r/o), command (r/w)
registers[2] = (u32::from(class_code.get_register_value()) << 24)
| (u32::from(subclass.get_register_value()) << 16)
| u32::from(revision_id);
writable_bits[3] = 0x0000_00ff; // Cacheline size (r/w)
registers[3] = 0x0000_0000; // Header type 0 (device)
writable_bits[15] = 0x0000_00ff; // IRQ line (r/w)
registers[11] = (u32::from(subsystem_id) << 16) | u32::from(subsystem_vendor_id);
PciConfiguration {
registers,
writable_bits,
bars: [PciBar::default(); NUM_BAR_REGS],
last_capability: None,
msix_cap_reg_idx: None,
msix_config,
}
}
/// Create a type 0 PCI configuration from snapshot state
pub fn type0_from_state(
state: PciConfigurationState,
msix_config: Option<Arc<Mutex<MsixConfig>>>,
) -> Self {
PciConfiguration {
registers: state.registers.try_into().unwrap(),
writable_bits: state.writable_bits.try_into().unwrap(),
bars: state.bars.try_into().unwrap(),
last_capability: state.last_capability,
msix_cap_reg_idx: state.msix_cap_reg_idx,
msix_config,
}
}
/// Create PCI configuration space state
pub fn state(&self) -> PciConfigurationState {
PciConfigurationState {
registers: self.registers.to_vec(),
writable_bits: self.writable_bits.to_vec(),
bars: self.bars.to_vec(),
last_capability: self.last_capability,
msix_cap_reg_idx: self.msix_cap_reg_idx,
}
}
/// Reads a 32bit register from `reg_idx` in the register map.
pub fn read_reg(&self, reg_idx: usize) -> u32 {
*(self.registers.get(reg_idx).unwrap_or(&0xffff_ffff))
}
/// Writes a 32bit register to `reg_idx` in the register map.
pub fn write_reg(&mut self, reg_idx: usize, value: u32) {
let mut mask = self.writable_bits[reg_idx];
if (BAR0_REG..BAR0_REG + NUM_BAR_REGS).contains(®_idx) {
// Handle very specific case where the BAR is being written with
// all 1's to retrieve the BAR size during next BAR reading.
if value == 0xffff_ffff {
mask &= self.bars[reg_idx - 4].size;
}
} else if reg_idx == ROM_BAR_REG {
// Handle very specific case where the BAR is being written with
// all 1's on bits 31-11 to retrieve the BAR size during next BAR
// reading.
if value & ROM_BAR_ADDR_MASK == ROM_BAR_ADDR_MASK {
mask = 0;
}
}
if let Some(r) = self.registers.get_mut(reg_idx) {
*r = (*r & !self.writable_bits[reg_idx]) | (value & mask);
} else {
warn!("bad PCI register write {}", reg_idx);
}
}
/// Writes a 16bit word to `offset`. `offset` must be 16bit aligned.
pub fn write_word(&mut self, offset: usize, value: u16) {
let shift = match offset % 4 {
0 => 0,
2 => 16,
_ => {
warn!("bad PCI config write offset {}", offset);
return;
}
};
let reg_idx = offset / 4;
if let Some(r) = self.registers.get_mut(reg_idx) {
let writable_mask = self.writable_bits[reg_idx];
let mask = (0xffffu32 << shift) & writable_mask;
let shifted_value = (u32::from(value) << shift) & writable_mask;
*r = *r & !mask | shifted_value;
} else {
warn!("bad PCI config write offset {}", offset);
}
}
/// Writes a byte to `offset`.
pub fn write_byte(&mut self, offset: usize, value: u8) {
self.write_byte_internal(offset, value, true);
}
/// Writes a byte to `offset`, optionally enforcing read-only bits.
fn write_byte_internal(&mut self, offset: usize, value: u8, apply_writable_mask: bool) {
let shift = (offset % 4) * 8;
let reg_idx = offset / 4;
if let Some(r) = self.registers.get_mut(reg_idx) {
let writable_mask = if apply_writable_mask {
self.writable_bits[reg_idx]
} else {
0xffff_ffff
};
let mask = (0xffu32 << shift) & writable_mask;
let shifted_value = (u32::from(value) << shift) & writable_mask;
*r = *r & !mask | shifted_value;
} else {
warn!("bad PCI config write offset {}", offset);
}
}
/// Add the [addr, addr + size) BAR region.
///
/// Configures the specified BAR to report this region and size to the guest kernel.
/// Enforces a few constraints (i.e, region size must be power of two, register not already
/// used).
pub fn add_pci_bar(&mut self, bar_idx: usize, addr: u64, size: u64) {
let reg_idx = BAR0_REG + bar_idx;
// These are a few constraints that are imposed due to the fact
// that only VirtIO devices are actually allocating a BAR. Moreover, this is
// a single 64-bit BAR. Not conforming to these requirements is an internal
// Firecracker bug.
// We are only using BAR 0
assert_eq!(bar_idx, 0);
// We shouldn't be trying to use the same BAR twice
assert!(!self.bars[0].used);
assert!(!self.bars[1].used);
// We can't have a size of 0
assert_ne!(size, 0);
// BAR size needs to be a power of two
assert!(size.is_power_of_two());
// We should not be overflowing the address space
addr.checked_add(size - 1).unwrap();
// Encode the BAR size as expected by the software running in
// the guest.
let (bar_size_hi, bar_size_lo) = encode_64_bits_bar_size(size);
self.registers[reg_idx + 1] = (addr >> 32) as u32;
self.writable_bits[reg_idx + 1] = 0xffff_ffff;
self.bars[bar_idx + 1].addr = self.registers[reg_idx + 1];
self.bars[bar_idx].size = bar_size_lo;
self.bars[bar_idx + 1].size = bar_size_hi;
self.bars[bar_idx + 1].used = true;
// Addresses of memory BARs are 16-byte aligned so the lower 4 bits are always 0. Within
// the register we use this 4 bits to encode extra information about the BAR. The meaning
// of these bits is:
//
// | Bit 3 | Bits 2-1 | Bit 0 |
// | Prefetchable | type | Always 0 |
//
// Non-prefetchable, 64 bits BAR region
self.registers[reg_idx] = (((addr & 0xffff_ffff) as u32) & BAR_MEM_ADDR_MASK) | 4u32;
self.writable_bits[reg_idx] = BAR_MEM_ADDR_MASK;
self.bars[bar_idx].addr = self.registers[reg_idx];
self.bars[bar_idx].used = true;
}
/// Returns the address of the given BAR region.
///
/// This assumes that `bar_idx` is a valid BAR register.
pub fn get_bar_addr(&self, bar_idx: usize) -> u64 {
assert!(bar_idx < NUM_BAR_REGS);
let reg_idx = BAR0_REG + bar_idx;
(u64::from(self.bars[bar_idx].addr & self.writable_bits[reg_idx]))
| (u64::from(self.bars[bar_idx + 1].addr) << 32)
}
/// Adds the capability `cap_data` to the list of capabilities.
///
/// `cap_data` should not include the two-byte PCI capability header (type, next).
/// Correct values will be generated automatically based on `cap_data.id()` and
/// `cap_data.len()`.
pub fn add_capability(&mut self, cap_data: &dyn PciCapability) -> usize {
let total_len = cap_data.bytes().len() + 2;
let (cap_offset, tail_offset) = match self.last_capability {
Some((offset, len)) => (Self::next_dword(offset, len), offset + 1),
None => (FIRST_CAPABILITY_OFFSET, CAPABILITY_LIST_HEAD_OFFSET),
};
// We know that the capabilities we are using have a valid size (doesn't overflow) and that
// we add capabilities that fit in the available space. If any of these requirements don't
// hold, this is due to a Firecracker bug.
let end_offset = cap_offset.checked_add(total_len).unwrap();
assert!(end_offset <= CAPABILITY_MAX_OFFSET);
self.registers[STATUS_REG] |= STATUS_REG_CAPABILITIES_USED_MASK;
self.write_byte_internal(tail_offset, cap_offset.try_into().unwrap(), false);
self.write_byte_internal(cap_offset, cap_data.id() as u8, false);
self.write_byte_internal(cap_offset + 1, 0, false); // Next pointer.
for (i, byte) in cap_data.bytes().iter().enumerate() {
self.write_byte_internal(cap_offset + i + 2, *byte, false);
}
self.last_capability = Some((cap_offset, total_len));
match cap_data.id() {
PciCapabilityId::MessageSignalledInterrupts => {
self.writable_bits[cap_offset / 4] = MSI_CAPABILITY_REGISTER_MASK;
}
PciCapabilityId::MsiX => {
self.msix_cap_reg_idx = Some(cap_offset / 4);
self.writable_bits[self.msix_cap_reg_idx.unwrap()] = MSIX_CAPABILITY_REGISTER_MASK;
}
_ => {}
}
cap_offset
}
// Find the next aligned offset after the one given.
fn next_dword(offset: usize, len: usize) -> usize {
let next = offset + len;
(next + 3) & !3
}
/// Write a PCI configuration register
pub fn write_config_register(&mut self, reg_idx: usize, offset: u64, data: &[u8]) {
if reg_idx >= NUM_CONFIGURATION_REGISTERS {
return;
}
if u64_to_usize(offset) + data.len() > 4 {
return;
}
// Handle potential write to MSI-X message control register
if let Some(msix_cap_reg_idx) = self.msix_cap_reg_idx
&& let Some(msix_config) = &self.msix_config
{
if msix_cap_reg_idx == reg_idx && offset == 2 && data.len() == 2 {
// 2-bytes write in the Message Control field
msix_config
.lock()
.unwrap()
.set_msg_ctl(LittleEndian::read_u16(data));
} else if msix_cap_reg_idx == reg_idx && offset == 0 && data.len() == 4 {
// 4 bytes write at the beginning. Ignore the first 2 bytes which are the
// capability id and next capability pointer
msix_config
.lock()
.unwrap()
.set_msg_ctl((LittleEndian::read_u32(data) >> 16) as u16);
}
}
match data.len() {
1 => self.write_byte(reg_idx * 4 + u64_to_usize(offset), data[0]),
2 => self.write_word(
reg_idx * 4 + u64_to_usize(offset),
u16::from(data[0]) | (u16::from(data[1]) << 8),
),
4 => self.write_reg(reg_idx, LittleEndian::read_u32(data)),
_ => (),
}
}
/// Detect whether the guest wants to reprogram the address of a BAR
pub fn detect_bar_reprogramming(
&mut self,
reg_idx: usize,
data: &[u8],
) -> Option<BarReprogrammingParams> {
if data.len() != 4 {
return None;
}
let value = LittleEndian::read_u32(data);
let mask = self.writable_bits[reg_idx];
if !(BAR0_REG..BAR0_REG + NUM_BAR_REGS).contains(®_idx) {
return None;
}
// Ignore the case where the BAR size is being asked for.
if value == 0xffff_ffff {
return None;
}
let bar_idx = reg_idx - 4;
// Do not reprogram BARs we are not using
if !self.bars[bar_idx].used {
return None;
}
// We are always using 64bit BARs, so two BAR registers. We don't do anything until
// the upper BAR is modified, otherwise we would be moving the BAR to a wrong
// location in memory.
if bar_idx == 0 {
return None;
}
// The lower BAR (of this 64bit BAR) has been reprogrammed to a different value
// than it used to be
if (self.registers[reg_idx - 1] & self.writable_bits[reg_idx - 1])
!= (self.bars[bar_idx - 1].addr & self.writable_bits[reg_idx - 1]) ||
// Or the lower BAR hasn't been changed but the upper one is being reprogrammed
// now to a different value
(value & mask) != (self.bars[bar_idx].addr & mask)
{
info!(
"Detected BAR reprogramming: (BAR {}) 0x{:x}->0x{:x}",
reg_idx, self.registers[reg_idx], value
);
let old_base = (u64::from(self.bars[bar_idx].addr & mask) << 32)
| u64::from(self.bars[bar_idx - 1].addr & self.writable_bits[reg_idx - 1]);
let new_base = (u64::from(value & mask) << 32)
| u64::from(self.registers[reg_idx - 1] & self.writable_bits[reg_idx - 1]);
let len = decode_64_bits_bar_size(self.bars[bar_idx].size, self.bars[bar_idx - 1].size);
self.bars[bar_idx].addr = value;
self.bars[bar_idx - 1].addr = self.registers[reg_idx - 1];
return Some(BarReprogrammingParams {
old_base,
new_base,
len,
});
}
None
}
}
#[cfg(test)]
mod tests {
use pci::PciMultimediaSubclass;
use vm_memory::ByteValued;
use super::*;
use crate::pci::msix::MsixCap;
#[repr(C, packed)]
#[derive(Clone, Copy, Default)]
#[allow(dead_code)]
struct TestCap {
len: u8,
foo: u8,
}
// SAFETY: All members are simple numbers and any value is valid.
unsafe impl ByteValued for TestCap {}
impl PciCapability for TestCap {
fn bytes(&self) -> &[u8] {
self.as_slice()
}
fn id(&self) -> PciCapabilityId {
PciCapabilityId::VendorSpecific
}
}
struct BadCap {
data: Vec<u8>,
}
impl BadCap {
fn new(len: u8) -> Self {
Self {
data: (0..len).collect(),
}
}
}
impl PciCapability for BadCap {
fn bytes(&self) -> &[u8] {
&self.data
}
fn id(&self) -> PciCapabilityId {
PciCapabilityId::VendorSpecific
}
}
#[test]
#[should_panic]
fn test_too_big_capability() {
let mut cfg = default_pci_config();
cfg.add_capability(&BadCap::new(127));
}
#[test]
#[should_panic]
fn test_capability_space_overflow() {
let mut cfg = default_pci_config();
cfg.add_capability(&BadCap::new(62));
cfg.add_capability(&BadCap::new(62));
cfg.add_capability(&BadCap::new(0));
}
#[test]
fn test_add_capability() {
let mut cfg = default_pci_config();
// Reset capabilities
cfg.last_capability = None;
// Add two capabilities with different contents.
let cap1 = TestCap { len: 4, foo: 0xAA };
let cap1_offset = cfg.add_capability(&cap1);
assert_eq!(cap1_offset % 4, 0);
let cap2 = TestCap {
len: 0x04,
foo: 0x55,
};
let cap2_offset = cfg.add_capability(&cap2);
assert_eq!(cap2_offset % 4, 0);
// The capability list head should be pointing to cap1.
let cap_ptr = cfg.read_reg(CAPABILITY_LIST_HEAD_OFFSET / 4) & 0xFF;
assert_eq!(cap1_offset, cap_ptr as usize);
// Verify the contents of the capabilities.
let cap1_data = cfg.read_reg(cap1_offset / 4);
assert_eq!(cap1_data & 0xFF, 0x09); // capability ID
assert_eq!((cap1_data >> 8) & 0xFF, u32::try_from(cap2_offset).unwrap()); // next capability pointer
assert_eq!((cap1_data >> 16) & 0xFF, 0x04); // cap1.len
assert_eq!((cap1_data >> 24) & 0xFF, 0xAA); // cap1.foo
let cap2_data = cfg.read_reg(cap2_offset / 4);
assert_eq!(cap2_data & 0xFF, 0x09); // capability ID
assert_eq!((cap2_data >> 8) & 0xFF, 0x00); // next capability pointer
assert_eq!((cap2_data >> 16) & 0xFF, 0x04); // cap2.len
assert_eq!((cap2_data >> 24) & 0xFF, 0x55); // cap2.foo
}
#[test]
fn test_msix_capability() {
let mut cfg = default_pci_config();
// Information about the MSI-X capability layout: https://wiki.osdev.org/PCI#Enabling_MSI-X
let msix_cap = MsixCap::new(
3, // Using BAR3 for message control table
1024, // 1024 MSI-X vectors
0x4000, // Offset of message control table inside the BAR
4, // BAR4 used for pending control bit
0x420, // Offset of pending bit array (PBA) inside BAR
);
cfg.add_capability(&msix_cap);
let cap_reg = FIRST_CAPABILITY_OFFSET / 4;
let reg = cfg.read_reg(cap_reg);
// Capability ID is MSI-X
assert_eq!(
PciCapabilityId::from((reg & 0xff) as u8),
PciCapabilityId::MsiX
);
// We only have one capability, so `next` should be 0
assert_eq!(((reg >> 8) & 0xff) as u8, 0);
let msg_ctl = (reg >> 16) as u16;
// MSI-X is enabled
assert_eq!(msg_ctl & 0x8000, 0x8000);
// Vectors are not masked
assert_eq!(msg_ctl & 0x4000, 0x0);
// Reserved bits are 0
assert_eq!(msg_ctl & 0x3800, 0x0);
// We've got 1024 vectors (Table size is N-1 encoded)
assert_eq!((msg_ctl & 0x7ff) + 1, 1024);
let reg = cfg.read_reg(cap_reg + 1);
// We are using BAR3
assert_eq!(reg & 0x7, 3);
// Message Control Table is located in offset 0x4000 inside the BAR
// We don't need to shift. Offset needs to be 8-byte aligned - so BIR
// is stored in its last 3 bits (which we need to mask out).
assert_eq!(reg & 0xffff_fff8, 0x4000);
let reg = cfg.read_reg(cap_reg + 2);
// PBA is 0x420 bytes inside BAR4
assert_eq!(reg & 0x7, 4);
assert_eq!(reg & 0xffff_fff8, 0x420);
// Check read/write mask
// Capability Id of MSI-X is 0x11
cfg.write_config_register(cap_reg, 0, &[0x0]);
assert_eq!(
PciCapabilityId::from((cfg.read_reg(cap_reg) & 0xff) as u8),
PciCapabilityId::MsiX
);
// Cannot override next capability pointer
cfg.write_config_register(cap_reg, 1, &[0x42]);
assert_eq!((cfg.read_reg(cap_reg) >> 8) & 0xff, 0);
// We are writing this:
//
// meaning: | MSI enabled | Vectors Masked | Reserved | Table size |
// bit: | 15 | 14 | 13 - 11 | 0 - 10 |
// R/W: | R/W | R/W | R | R |
let msg_ctl = (cfg.read_reg(cap_reg) >> 16) as u16;
// Try to flip all bits
cfg.write_config_register(cap_reg, 2, &u16::to_le_bytes(!msg_ctl));
let msg_ctl = (cfg.read_reg(cap_reg) >> 16) as u16;
// MSI enabled and Vectors masked should be flipped (MSI disabled and vectors masked)
assert_eq!(msg_ctl & 0xc000, 0x4000);
// Reserved bits should still be 0
assert_eq!(msg_ctl & 0x3800, 0);
// Table size should not have changed
assert_eq!((msg_ctl & 0x07ff) + 1, 1024);
// Table offset is read only
let table_offset = cfg.read_reg(cap_reg + 1);
// Try to flip all bits
cfg.write_config_register(cap_reg + 1, 0, &u32::to_le_bytes(!table_offset));
// None should be flipped
assert_eq!(cfg.read_reg(cap_reg + 1), table_offset);
// PBA offset also
let pba_offset = cfg.read_reg(cap_reg + 2);
// Try to flip all bits
cfg.write_config_register(cap_reg + 2, 0, &u32::to_le_bytes(!pba_offset));
// None should be flipped
assert_eq!(cfg.read_reg(cap_reg + 2), pba_offset);
}
fn default_pci_config() -> PciConfiguration {
PciConfiguration::new_type0(
0x1234,
0x5678,
0x1,
PciClassCode::MultimediaController,
&PciMultimediaSubclass::AudioController,
0xABCD,
0x2468,
None,
)
}
#[test]
fn class_code() {
let cfg = default_pci_config();
let class_reg = cfg.read_reg(2);
let class_code = (class_reg >> 24) & 0xFF;
let subclass = (class_reg >> 16) & 0xFF;
let prog_if = (class_reg >> 8) & 0xFF;
assert_eq!(class_code, 0x04);
assert_eq!(subclass, 0x01);
assert_eq!(prog_if, 0x0);
}
#[test]
#[should_panic]
fn test_encode_zero_sized_bar() {
encode_64_bits_bar_size(0);
}
#[test]
#[should_panic]
fn test_decode_zero_sized_bar() {
decode_64_bits_bar_size(0, 0);
}
#[test]
fn test_bar_size_encoding() {
// According to OSDev wiki (https://wiki.osdev.org/PCI#Address_and_size_of_the_BAR):
//
// > To determine the amount of address space needed by a PCI device, you must save the
// > original value of the BAR, write a value of all 1's to the register, then read it back.
// > The amount of memory can then be determined by masking the information bits, performing
// > a bitwise NOT ('~' in C), and incrementing the value by 1. The original value of the
// BAR > should then be restored. The BAR register is naturally aligned and as such you can
// only > modify the bits that are set. For example, if a device utilizes 16 MB it will
// have BAR0 > filled with 0xFF000000 (0x1000000 after decoding) and you can only modify
// the upper > 8-bits.
//
// So, we encode a 64 bits size and then store it as a 2 32bit addresses (we use
// two BARs).
let (hi, lo) = encode_64_bits_bar_size(0xffff_ffff_ffff_fff0);
assert_eq!(hi, 0);
assert_eq!(lo, 0x0000_0010);
assert_eq!(decode_64_bits_bar_size(hi, lo), 0xffff_ffff_ffff_fff0);
}
#[test]
#[should_panic]
fn test_bar_size_no_power_of_two() {
let mut pci_config = default_pci_config();
pci_config.add_pci_bar(0, 0x1000, 0x1001);
}
#[test]
#[should_panic]
fn test_bad_bar_index() {
let mut pci_config = default_pci_config();
pci_config.add_pci_bar(NUM_BAR_REGS, 0x1000, 0x1000);
}
#[test]
#[should_panic]
fn test_bad_64bit_bar_index() {
let mut pci_config = default_pci_config();
pci_config.add_pci_bar(NUM_BAR_REGS - 1, 0x1000, 0x1000);
}
#[test]
#[should_panic]
fn test_bar_size_overflows() {
let mut pci_config = default_pci_config();
pci_config.add_pci_bar(0, u64::MAX, 0x2);
}
#[test]
#[should_panic]
fn test_lower_bar_free_upper_used() {
let mut pci_config = default_pci_config();
pci_config.add_pci_bar(1, 0x1000, 0x1000);
pci_config.add_pci_bar(0, 0x1000, 0x1000);
}
#[test]
#[should_panic]
fn test_lower_bar_used() {
let mut pci_config = default_pci_config();
pci_config.add_pci_bar(0, 0x1000, 0x1000);
pci_config.add_pci_bar(0, 0x1000, 0x1000);
}
#[test]
#[should_panic]
fn test_upper_bar_used() {
let mut pci_config = default_pci_config();
pci_config.add_pci_bar(0, 0x1000, 0x1000);
pci_config.add_pci_bar(1, 0x1000, 0x1000);
}
#[test]
fn test_add_pci_bar() {
let mut pci_config = default_pci_config();
pci_config.add_pci_bar(0, 0x1_0000_0000, 0x1000);
assert_eq!(pci_config.get_bar_addr(0), 0x1_0000_0000);
assert_eq!(pci_config.read_reg(BAR0_REG) & 0xffff_fff0, 0x0);
assert!(pci_config.bars[0].used);
assert_eq!(pci_config.read_reg(BAR0_REG + 1), 1);
assert!(pci_config.bars[0].used);
}
#[test]
fn test_access_invalid_reg() {
let mut pci_config = default_pci_config();
// Can't read past the end of the configuration space
assert_eq!(
pci_config.read_reg(NUM_CONFIGURATION_REGISTERS),
0xffff_ffff
);
// Read out all of configuration space
let config_space: Vec<u32> = (0..NUM_CONFIGURATION_REGISTERS)
.map(|reg_idx| pci_config.read_reg(reg_idx))
.collect();
// Various invalid write accesses
// Past the end of config space
pci_config.write_config_register(NUM_CONFIGURATION_REGISTERS, 0, &[0x42]);
pci_config.write_config_register(NUM_CONFIGURATION_REGISTERS, 0, &[0x42, 0x42]);
pci_config.write_config_register(NUM_CONFIGURATION_REGISTERS, 0, &[0x42, 0x42, 0x42, 0x42]);
// Past register boundaries
pci_config.write_config_register(NUM_CONFIGURATION_REGISTERS, 1, &[0x42, 0x42, 0x42, 0x42]);
pci_config.write_config_register(NUM_CONFIGURATION_REGISTERS, 2, &[0x42, 0x42, 0x42]);
pci_config.write_config_register(NUM_CONFIGURATION_REGISTERS, 3, &[0x42, 0x42]);
pci_config.write_config_register(NUM_CONFIGURATION_REGISTERS, 4, &[0x42]);
pci_config.write_config_register(NUM_CONFIGURATION_REGISTERS, 5, &[]);
for (reg_idx, reg) in config_space.iter().enumerate() {
assert_eq!(*reg, pci_config.read_reg(reg_idx));
}
}
#[test]
fn test_detect_bar_reprogramming() {
let mut pci_config = default_pci_config();
// Trying to reprogram with something less than 4 bytes (length of the address) should fail
assert!(
pci_config
.detect_bar_reprogramming(BAR0_REG, &[0x13])
.is_none()
);
assert!(
pci_config
.detect_bar_reprogramming(BAR0_REG, &[0x13, 0x12])
.is_none()
);
assert!(
pci_config
.detect_bar_reprogramming(BAR0_REG, &[0x13, 0x12])
.is_none()
);
assert!(
pci_config
.detect_bar_reprogramming(BAR0_REG, &[0x13, 0x12, 0x16])
.is_none()
);
// Writing all 1s is a special case where we're actually asking for the size of the BAR
assert!(
pci_config
.detect_bar_reprogramming(BAR0_REG, &u32::to_le_bytes(0xffff_ffff))
.is_none()
);
// Trying to reprogram a BAR that hasn't be initialized does nothing
for reg_idx in BAR0_REG..BAR0_REG + NUM_BAR_REGS {
assert!(
pci_config
.detect_bar_reprogramming(reg_idx, &u32::to_le_bytes(0x1312_4243))
.is_none()
);
}
// Reprogramming of a 64bit BAR
pci_config.add_pci_bar(0, 0x13_1200_0000, 0x8000);
// First we write the lower 32 bits and this shouldn't cause any reprogramming
assert!(
pci_config
.detect_bar_reprogramming(BAR0_REG, &u32::to_le_bytes(0x4200_0000))
.is_none()
);
pci_config.write_config_register(BAR0_REG, 0, &u32::to_le_bytes(0x4200_0000));
// Writing the upper 32 bits should trigger the reprogramming
assert_eq!(
pci_config.detect_bar_reprogramming(BAR0_REG + 1, &u32::to_le_bytes(0x84)),
Some(BarReprogrammingParams {
old_base: 0x13_1200_0000,
new_base: 0x84_4200_0000,
len: 0x8000,
})
);
pci_config.write_config_register(BAR0_REG + 1, 0, &u32::to_le_bytes(0x84));
// Trying to reprogram the upper bits directly (without first touching the lower bits)
// should trigger a reprogramming
assert_eq!(
pci_config.detect_bar_reprogramming(BAR0_REG + 1, &u32::to_le_bytes(0x1312)),
Some(BarReprogrammingParams {
old_base: 0x84_4200_0000,
new_base: 0x1312_4200_0000,
len: 0x8000,
})
);
pci_config.write_config_register(BAR0_REG + 1, 0, &u32::to_le_bytes(0x1312));
// Attempting to reprogram the BAR with the same address should not have any effect
assert!(
pci_config
.detect_bar_reprogramming(BAR0_REG, &u32::to_le_bytes(0x4200_0000))
.is_none()
);
assert!(
pci_config
.detect_bar_reprogramming(BAR0_REG + 1, &u32::to_le_bytes(0x1312))
.is_none()
);
}
#[test]
fn test_rom_bar() {
let mut pci_config = default_pci_config();
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/pci/mod.rs | src/vmm/src/pci/mod.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
/// PCI bus logic
pub mod bus;
/// PCI configuration space handling
pub mod configuration;
/// MSI-X logic
pub mod msix;
use std::fmt::Debug;
use std::sync::{Arc, Barrier};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
/// Parameters for performing a BAR reprogramming operation
pub struct BarReprogrammingParams {
/// Previous address of the BAR
pub old_base: u64,
/// New address of the BAR
pub new_base: u64,
/// Size of the BAR
pub len: u64,
}
/// Common logic of all PCI devices
pub trait PciDevice: Send {
/// Sets a register in the configuration space.
/// * `reg_idx` - The index of the config register to modify.
/// * `offset` - Offset into the register.
fn write_config_register(
&mut self,
reg_idx: usize,
offset: u64,
data: &[u8],
) -> Option<Arc<Barrier>>;
/// Gets a register from the configuration space.
/// * `reg_idx` - The index of the config register to read.
fn read_config_register(&mut self, reg_idx: usize) -> u32;
/// Detects if a BAR is being reprogrammed.
fn detect_bar_reprogramming(
&mut self,
_reg_idx: usize,
_data: &[u8],
) -> Option<BarReprogrammingParams> {
None
}
/// Reads from a BAR region mapped into the device.
/// * `addr` - The guest address inside the BAR.
/// * `data` - Filled with the data from `addr`.
fn read_bar(&mut self, _base: u64, _offset: u64, _data: &mut [u8]) {}
/// Writes to a BAR region mapped into the device.
/// * `addr` - The guest address inside the BAR.
/// * `data` - The data to write.
fn write_bar(&mut self, _base: u64, _offset: u64, _data: &[u8]) -> Option<Arc<Barrier>> {
None
}
/// Relocates the BAR to a different address in guest address space.
fn move_bar(&mut self, _old_base: u64, _new_base: u64) -> Result<(), DeviceRelocationError> {
Ok(())
}
}
/// Errors for device manager.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum DeviceRelocationError {
/// Device relocation not supported.
NotSupported,
}
/// This trait defines a set of functions which can be triggered whenever a
/// PCI device is modified in any way.
pub trait DeviceRelocation: Send + Sync {
/// The BAR needs to be moved to a different location in the guest address
/// space. This follows a decision from the software running in the guest.
fn move_bar(
&self,
old_base: u64,
new_base: u64,
len: u64,
pci_dev: &mut dyn PciDevice,
) -> Result<(), DeviceRelocationError>;
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/pci/msix.rs | src/vmm/src/pci/msix.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
//
use std::sync::Arc;
use byteorder::{ByteOrder, LittleEndian};
use pci::PciCapabilityId;
use serde::{Deserialize, Serialize};
use vm_memory::ByteValued;
use crate::Vm;
use crate::logger::{debug, error, warn};
use crate::pci::configuration::PciCapability;
use crate::snapshot::Persist;
use crate::vstate::interrupts::{InterruptError, MsixVectorConfig, MsixVectorGroup};
const MAX_MSIX_VECTORS_PER_DEVICE: u16 = 2048;
const MSIX_TABLE_ENTRIES_MODULO: u64 = 16;
const MSIX_PBA_ENTRIES_MODULO: u64 = 8;
const BITS_PER_PBA_ENTRY: usize = 64;
const FUNCTION_MASK_BIT: u8 = 14;
const MSIX_ENABLE_BIT: u8 = 15;
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
/// MSI-X table entries
pub struct MsixTableEntry {
/// Lower 32 bits of the vector address
pub msg_addr_lo: u32,
/// Upper 32 bits of the vector address
pub msg_addr_hi: u32,
/// Vector data
pub msg_data: u32,
/// Enable/Disable and (un)masking control
pub vector_ctl: u32,
}
impl MsixTableEntry {
/// Returns `true` if the vector is masked
pub fn masked(&self) -> bool {
self.vector_ctl & 0x1 == 0x1
}
}
impl Default for MsixTableEntry {
fn default() -> Self {
MsixTableEntry {
msg_addr_lo: 0,
msg_addr_hi: 0,
msg_data: 0,
vector_ctl: 0x1,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
/// State for (de)serializing MSI-X configuration
pub struct MsixConfigState {
table_entries: Vec<MsixTableEntry>,
pba_entries: Vec<u64>,
masked: bool,
enabled: bool,
vectors: Vec<u32>,
}
/// MSI-X configuration
pub struct MsixConfig {
/// Vector table entries
pub table_entries: Vec<MsixTableEntry>,
/// Pending bit array
pub pba_entries: Vec<u64>,
/// Id of the device using this set of vectors
pub devid: u32,
/// Interrupts vectors used
pub vectors: Arc<MsixVectorGroup>,
/// Whether vectors are masked
pub masked: bool,
/// Whether vectors are enabled
pub enabled: bool,
}
impl std::fmt::Debug for MsixConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MsixConfig")
.field("table_entries", &self.table_entries)
.field("pba_entries", &self.pba_entries)
.field("devid", &self.devid)
.field("masked", &self.masked)
.field("enabled", &self.enabled)
.finish()
}
}
impl MsixConfig {
/// Create a new MSI-X configuration
pub fn new(vectors: Arc<MsixVectorGroup>, devid: u32) -> Self {
assert!(vectors.num_vectors() <= MAX_MSIX_VECTORS_PER_DEVICE);
let mut table_entries: Vec<MsixTableEntry> = Vec::new();
table_entries.resize_with(vectors.num_vectors() as usize, Default::default);
let mut pba_entries: Vec<u64> = Vec::new();
let num_pba_entries: usize = (vectors.num_vectors() as usize).div_ceil(BITS_PER_PBA_ENTRY);
pba_entries.resize_with(num_pba_entries, Default::default);
MsixConfig {
table_entries,
pba_entries,
devid,
vectors,
masked: true,
enabled: false,
}
}
/// Create an MSI-X configuration from snapshot state
pub fn from_state(
state: MsixConfigState,
vm: Arc<Vm>,
devid: u32,
) -> Result<Self, InterruptError> {
let vectors = Arc::new(MsixVectorGroup::restore(vm, &state.vectors)?);
if state.enabled && !state.masked {
for (idx, table_entry) in state.table_entries.iter().enumerate() {
if table_entry.masked() {
continue;
}
let config = MsixVectorConfig {
high_addr: table_entry.msg_addr_hi,
low_addr: table_entry.msg_addr_lo,
data: table_entry.msg_data,
devid,
};
vectors.update(idx, config, state.masked, true)?;
vectors.enable()?;
}
}
Ok(MsixConfig {
table_entries: state.table_entries,
pba_entries: state.pba_entries,
devid,
vectors,
masked: state.masked,
enabled: state.enabled,
})
}
/// Create the state object for serializing MSI-X vectors
pub fn state(&self) -> MsixConfigState {
MsixConfigState {
table_entries: self.table_entries.clone(),
pba_entries: self.pba_entries.clone(),
masked: self.masked,
enabled: self.enabled,
vectors: self.vectors.save(),
}
}
/// Set the MSI-X control message (enable/disable, (un)mask)
pub fn set_msg_ctl(&mut self, reg: u16) {
let old_masked = self.masked;
let old_enabled = self.enabled;
self.masked = ((reg >> FUNCTION_MASK_BIT) & 1u16) == 1u16;
self.enabled = ((reg >> MSIX_ENABLE_BIT) & 1u16) == 1u16;
// Update interrupt routing
if old_masked != self.masked || old_enabled != self.enabled {
if self.enabled && !self.masked {
debug!("MSI-X enabled for device 0x{:x}", self.devid);
for (idx, table_entry) in self.table_entries.iter().enumerate() {
let config = MsixVectorConfig {
high_addr: table_entry.msg_addr_hi,
low_addr: table_entry.msg_addr_lo,
data: table_entry.msg_data,
devid: self.devid,
};
if let Err(e) = self.vectors.update(idx, config, table_entry.masked(), true) {
error!("Failed updating vector: {:?}", e);
}
}
} else if old_enabled || !old_masked {
debug!("MSI-X disabled for device 0x{:x}", self.devid);
if let Err(e) = self.vectors.disable() {
error!("Failed disabling irq_fd: {:?}", e);
}
}
}
// If the Function Mask bit was set, and has just been cleared, it's
// important to go through the entire PBA to check if there was any
// pending MSI-X message to inject, given that the vector is not
// masked.
if old_masked && !self.masked {
for (index, entry) in self.table_entries.clone().iter().enumerate() {
if !entry.masked() && self.get_pba_bit(index.try_into().unwrap()) == 1 {
self.inject_msix_and_clear_pba(index);
}
}
}
}
/// Read an MSI-X table entry
pub fn read_table(&self, offset: u64, data: &mut [u8]) {
assert!(data.len() <= 8);
let index: usize = (offset / MSIX_TABLE_ENTRIES_MODULO) as usize;
let modulo_offset = offset % MSIX_TABLE_ENTRIES_MODULO;
if index >= self.table_entries.len() {
warn!("Invalid MSI-X table entry index {index}");
data.fill(0xff);
return;
}
match data.len() {
4 => {
let value = match modulo_offset {
0x0 => self.table_entries[index].msg_addr_lo,
0x4 => self.table_entries[index].msg_addr_hi,
0x8 => self.table_entries[index].msg_data,
0xc => self.table_entries[index].vector_ctl,
off => {
warn!("msi-x: invalid offset in table entry read: {off}");
0xffff_ffff
}
};
LittleEndian::write_u32(data, value);
}
8 => {
let value = match modulo_offset {
0x0 => {
(u64::from(self.table_entries[index].msg_addr_hi) << 32)
| u64::from(self.table_entries[index].msg_addr_lo)
}
0x8 => {
(u64::from(self.table_entries[index].vector_ctl) << 32)
| u64::from(self.table_entries[index].msg_data)
}
off => {
warn!("msi-x: invalid offset in table entry read: {off}");
0xffff_ffff_ffff_ffff
}
};
LittleEndian::write_u64(data, value);
}
len => {
warn!("msi-x: invalid length in table entry read: {len}");
data.fill(0xff);
}
}
}
/// Write an MSI-X table entry
pub fn write_table(&mut self, offset: u64, data: &[u8]) {
assert!(data.len() <= 8);
let index: usize = (offset / MSIX_TABLE_ENTRIES_MODULO) as usize;
let modulo_offset = offset % MSIX_TABLE_ENTRIES_MODULO;
if index >= self.table_entries.len() {
warn!("msi-x: invalid table entry index {index}");
return;
}
// Store the value of the entry before modification
let old_entry = self.table_entries[index].clone();
match data.len() {
4 => {
let value = LittleEndian::read_u32(data);
match modulo_offset {
0x0 => self.table_entries[index].msg_addr_lo = value,
0x4 => self.table_entries[index].msg_addr_hi = value,
0x8 => self.table_entries[index].msg_data = value,
0xc => {
self.table_entries[index].vector_ctl = value;
}
off => warn!("msi-x: invalid offset in table entry write: {off}"),
};
}
8 => {
let value = LittleEndian::read_u64(data);
match modulo_offset {
0x0 => {
self.table_entries[index].msg_addr_lo = (value & 0xffff_ffffu64) as u32;
self.table_entries[index].msg_addr_hi = (value >> 32) as u32;
}
0x8 => {
self.table_entries[index].msg_data = (value & 0xffff_ffffu64) as u32;
self.table_entries[index].vector_ctl = (value >> 32) as u32;
}
off => warn!("msi-x: invalid offset in table entry write: {off}"),
};
}
len => warn!("msi-x: invalid length in table entry write: {len}"),
};
let table_entry = &self.table_entries[index];
// Optimisation to avoid excessive updates
if &old_entry == table_entry {
return;
}
// Update interrupt routes
// Optimisation: only update routes if the entry is not masked;
// this is safe because if the entry is masked (starts masked as per spec)
// in the table then it won't be triggered.
if self.enabled && !self.masked && !table_entry.masked() {
let config = MsixVectorConfig {
high_addr: table_entry.msg_addr_hi,
low_addr: table_entry.msg_addr_lo,
data: table_entry.msg_data,
devid: self.devid,
};
if let Err(e) = self
.vectors
.update(index, config, table_entry.masked(), true)
{
error!("Failed updating vector: {:?}", e);
}
}
// After the MSI-X table entry has been updated, it is necessary to
// check if the vector control masking bit has changed. In case the
// bit has been flipped from 1 to 0, we need to inject a MSI message
// if the corresponding pending bit from the PBA is set. Once the MSI
// has been injected, the pending bit in the PBA needs to be cleared.
// All of this is valid only if MSI-X has not been masked for the whole
// device.
// Check if bit has been flipped
if !self.masked
&& self.enabled
&& old_entry.masked()
&& !table_entry.masked()
&& self.get_pba_bit(index.try_into().unwrap()) == 1
{
self.inject_msix_and_clear_pba(index);
}
}
/// Read a pending bit array entry
pub fn read_pba(&self, offset: u64, data: &mut [u8]) {
let index: usize = (offset / MSIX_PBA_ENTRIES_MODULO) as usize;
let modulo_offset = offset % MSIX_PBA_ENTRIES_MODULO;
if index >= self.pba_entries.len() {
warn!("msi-x: invalid PBA entry index {index}");
data.fill(0xff);
return;
}
match data.len() {
4 => {
let value: u32 = match modulo_offset {
0x0 => (self.pba_entries[index] & 0xffff_ffffu64) as u32,
0x4 => (self.pba_entries[index] >> 32) as u32,
off => {
warn!("msi-x: invalid offset in pba entry read: {off}");
0xffff_ffff
}
};
LittleEndian::write_u32(data, value);
}
8 => {
let value: u64 = match modulo_offset {
0x0 => self.pba_entries[index],
off => {
warn!("msi-x: invalid offset in pba entry read: {off}");
0xffff_ffff_ffff_ffff
}
};
LittleEndian::write_u64(data, value);
}
len => {
warn!("msi-x: invalid length in table entry read: {len}");
data.fill(0xff);
}
}
}
/// Write a pending bit array entry
pub fn write_pba(&mut self, _offset: u64, _data: &[u8]) {
error!("Pending Bit Array is read only");
}
/// Set PBA bit for a vector
pub fn set_pba_bit(&mut self, vector: u16, reset: bool) {
assert!(vector < MAX_MSIX_VECTORS_PER_DEVICE);
if (vector as usize) >= self.table_entries.len() {
return;
}
let index: usize = (vector as usize) / BITS_PER_PBA_ENTRY;
let shift: usize = (vector as usize) % BITS_PER_PBA_ENTRY;
let mut mask: u64 = 1u64 << shift;
if reset {
mask = !mask;
self.pba_entries[index] &= mask;
} else {
self.pba_entries[index] |= mask;
}
}
/// Get the PBA bit for a vector
fn get_pba_bit(&self, vector: u16) -> u8 {
assert!(vector < MAX_MSIX_VECTORS_PER_DEVICE);
if (vector as usize) >= self.table_entries.len() {
return 0xff;
}
let index: usize = (vector as usize) / BITS_PER_PBA_ENTRY;
let shift: usize = (vector as usize) % BITS_PER_PBA_ENTRY;
((self.pba_entries[index] >> shift) & 0x0000_0001u64) as u8
}
/// Inject an MSI-X interrupt and clear the PBA bit for a vector
fn inject_msix_and_clear_pba(&mut self, vector: usize) {
// Inject the MSI message
match self.vectors.trigger(vector) {
Ok(_) => debug!("MSI-X injected on vector control flip"),
Err(e) => error!("failed to inject MSI-X: {}", e),
}
// Clear the bit from PBA
self.set_pba_bit(vector.try_into().unwrap(), true);
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
/// MSI-X PCI capability
pub struct MsixCap {
/// Message Control Register
/// 10-0: MSI-X Table size
/// 13-11: Reserved
/// 14: Mask. Mask all MSI-X when set.
/// 15: Enable. Enable all MSI-X when set.
pub msg_ctl: u16,
/// Table. Contains the offset and the BAR indicator (BIR)
/// 2-0: Table BAR indicator (BIR). Can be 0 to 5.
/// 31-3: Table offset in the BAR pointed by the BIR.
pub table: u32,
/// Pending Bit Array. Contains the offset and the BAR indicator (BIR)
/// 2-0: PBA BAR indicator (BIR). Can be 0 to 5.
/// 31-3: PBA offset in the BAR pointed by the BIR.
pub pba: u32,
}
// SAFETY: All members are simple numbers and any value is valid.
unsafe impl ByteValued for MsixCap {}
impl PciCapability for MsixCap {
fn bytes(&self) -> &[u8] {
self.as_slice()
}
fn id(&self) -> PciCapabilityId {
PciCapabilityId::MsiX
}
}
impl MsixCap {
/// Create a new MSI-X capability object
pub fn new(
table_pci_bar: u8,
table_size: u16,
table_off: u32,
pba_pci_bar: u8,
pba_off: u32,
) -> Self {
assert!(table_size < MAX_MSIX_VECTORS_PER_DEVICE);
// Set the table size and enable MSI-X.
let msg_ctl: u16 = 0x8000u16 + table_size - 1;
MsixCap {
msg_ctl,
table: (table_off & 0xffff_fff8u32) | u32::from(table_pci_bar & 0x7u8),
pba: (pba_off & 0xffff_fff8u32) | u32::from(pba_pci_bar & 0x7u8),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::builder::tests::default_vmm;
use crate::logger::{IncMetric, METRICS};
use crate::{Vm, check_metric_after_block};
fn msix_vector_group(nr_vectors: u16) -> Arc<MsixVectorGroup> {
let vmm = default_vmm();
Arc::new(Vm::create_msix_group(vmm.vm.clone(), nr_vectors).unwrap())
}
#[test]
#[should_panic]
fn test_too_many_vectors() {
MsixConfig::new(msix_vector_group(2049), 0x42);
}
#[test]
fn test_new_msix_config() {
let config = MsixConfig::new(msix_vector_group(2), 0x42);
assert_eq!(config.devid, 0x42);
assert!(config.masked);
assert!(!config.enabled);
assert_eq!(config.table_entries.len(), 2);
assert_eq!(config.pba_entries.len(), 1);
}
#[test]
fn test_enable_msix_vectors() {
let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
assert!(!config.enabled);
assert!(config.masked);
// Bit 15 marks whether MSI-X is enabled
// Bit 14 marks whether vectors are masked
config.set_msg_ctl(0x8000);
assert!(config.enabled);
assert!(!config.masked);
config.set_msg_ctl(0x4000);
assert!(!config.enabled);
assert!(config.masked);
config.set_msg_ctl(0xC000);
assert!(config.enabled);
assert!(config.masked);
config.set_msg_ctl(0x0);
assert!(!config.enabled);
assert!(!config.masked);
}
#[test]
#[should_panic]
fn test_table_access_read_too_big() {
let config = MsixConfig::new(msix_vector_group(2), 0x42);
let mut buffer = [0u8; 16];
config.read_table(0, &mut buffer);
}
#[test]
fn test_read_table_past_end() {
let config = MsixConfig::new(msix_vector_group(2), 0x42);
let mut buffer = [0u8; 8];
// We have 2 vectors (16 bytes each), so we should be able to read up to 32 bytes.
// Past that the device should respond with all 1s
config.read_table(32, &mut buffer);
assert_eq!(buffer, [0xff; 8]);
}
#[test]
fn test_read_table_bad_length() {
let config = MsixConfig::new(msix_vector_group(2), 0x42);
let mut buffer = [0u8; 8];
// We can either read 4 or 8 bytes
config.read_table(0, &mut buffer[..0]);
assert_eq!(buffer, [0x0; 8]);
config.read_table(0, &mut buffer[..1]);
assert_eq!(buffer[..1], [0xff; 1]);
config.read_table(0, &mut buffer[..2]);
assert_eq!(buffer[..2], [0xff; 2]);
config.read_table(0, &mut buffer[..3]);
assert_eq!(buffer[..3], [0xff; 3]);
config.read_table(0, &mut buffer[..5]);
assert_eq!(buffer[..5], [0xff; 5]);
config.read_table(0, &mut buffer[..6]);
assert_eq!(buffer[..6], [0xff; 6]);
config.read_table(0, &mut buffer[..7]);
assert_eq!(buffer[..7], [0xff; 7]);
config.read_table(0, &mut buffer[..4]);
assert_eq!(buffer, u64::to_le_bytes(0x00ff_ffff_0000_0000));
config.read_table(0, &mut buffer);
assert_eq!(buffer, u64::to_le_bytes(0));
}
#[test]
fn test_access_table() {
let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
// enabled and not masked
check_metric_after_block!(
METRICS.interrupts.config_updates,
2,
config.set_msg_ctl(0x8000)
);
let mut buffer = [0u8; 8];
// Write first vector's address with a single 8-byte write
// It's still masked so shouldn't be updated
check_metric_after_block!(
METRICS.interrupts.config_updates,
0,
config.write_table(0, &u64::to_le_bytes(0x0000_1312_0000_1110))
);
// Same for control and message data
// Now, we enabled it, so we should see an update
check_metric_after_block!(
METRICS.interrupts.config_updates,
1,
config.write_table(8, &u64::to_le_bytes(0x0_0000_0020))
);
// Write second vector's fields with 4-byte writes
// low 32 bits of the address (still masked)
check_metric_after_block!(
METRICS.interrupts.config_updates,
0,
config.write_table(16, &u32::to_le_bytes(0x4241))
);
// high 32 bits of the address (still masked)
check_metric_after_block!(
METRICS.interrupts.config_updates,
0,
config.write_table(20, &u32::to_le_bytes(0x4443))
);
// message data (still masked)
check_metric_after_block!(
METRICS.interrupts.config_updates,
0,
config.write_table(24, &u32::to_le_bytes(0x21))
);
// vector control (now unmasked)
check_metric_after_block!(
METRICS.interrupts.config_updates,
1,
config.write_table(28, &u32::to_le_bytes(0x0))
);
assert_eq!(config.table_entries[0].msg_addr_hi, 0x1312);
assert_eq!(config.table_entries[0].msg_addr_lo, 0x1110);
assert_eq!(config.table_entries[0].msg_data, 0x20);
assert_eq!(config.table_entries[0].vector_ctl, 0);
assert_eq!(config.table_entries[1].msg_addr_hi, 0x4443);
assert_eq!(config.table_entries[1].msg_addr_lo, 0x4241);
assert_eq!(config.table_entries[1].msg_data, 0x21);
assert_eq!(config.table_entries[1].vector_ctl, 0);
assert_eq!(config.table_entries.len(), 2);
assert_eq!(config.pba_entries.len(), 1);
// reading at a bad offset should return all 1s
config.read_table(1, &mut buffer[..4]);
assert_eq!(buffer[..4], [0xff; 4]);
// read low address for first vector
config.read_table(0, &mut buffer[..4]);
assert_eq!(
buffer[..4],
u32::to_le_bytes(config.table_entries[0].msg_addr_lo)
);
// read the high address for first vector
config.read_table(4, &mut buffer[4..]);
assert_eq!(0x0000_1312_0000_1110, u64::from_le_bytes(buffer));
// read msg_data from second vector
config.read_table(24, &mut buffer[..4]);
assert_eq!(u32::to_le_bytes(0x21), &buffer[..4]);
// read vector control for second vector
config.read_table(28, &mut buffer[..4]);
assert_eq!(u32::to_le_bytes(0x0), &buffer[..4]);
// reading with 8 bytes at bad offset should also return all 1s
config.read_table(19, &mut buffer);
assert_eq!(buffer, [0xff; 8]);
// Read the second vector's address using an 8 byte read
config.read_table(16, &mut buffer);
assert_eq!(0x0000_4443_0000_4241, u64::from_le_bytes(buffer));
// Read the first vector's ctrl and data with a single 8 byte read
config.read_table(8, &mut buffer);
assert_eq!(0x0_0000_0020, u64::from_le_bytes(buffer));
// If we mask the interrupts we shouldn't see any update
check_metric_after_block!(METRICS.interrupts.config_updates, 0, {
config.write_table(12, &u32::to_le_bytes(0x1));
config.write_table(28, &u32::to_le_bytes(0x1));
});
// Un-masking them should update them
check_metric_after_block!(METRICS.interrupts.config_updates, 2, {
config.write_table(12, &u32::to_le_bytes(0x0));
config.write_table(28, &u32::to_le_bytes(0x0));
});
// Setting up the same config should have no effect
check_metric_after_block!(METRICS.interrupts.config_updates, 0, {
config.write_table(12, &u32::to_le_bytes(0x0));
config.write_table(28, &u32::to_le_bytes(0x0));
});
}
#[test]
#[should_panic]
fn test_table_access_write_too_big() {
let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
let buffer = [0u8; 16];
config.write_table(0, &buffer);
}
#[test]
fn test_pba_read_too_big() {
let config = MsixConfig::new(msix_vector_group(2), 0x42);
let mut buffer = [0u8; 16];
config.read_pba(0, &mut buffer);
assert_eq!(buffer, [0xff; 16]);
}
#[test]
fn test_pba_invalid_offset() {
let config = MsixConfig::new(msix_vector_group(2), 0x42);
let mut buffer = [0u8; 8];
// Past the end of the PBA array
config.read_pba(128, &mut buffer);
assert_eq!(buffer, [0xffu8; 8]);
// Invalid offset within a valid entry
let mut buffer = [0u8; 8];
config.read_pba(3, &mut buffer[..4]);
assert_eq!(buffer[..4], [0xffu8; 4]);
config.read_pba(3, &mut buffer);
assert_eq!(buffer, [0xffu8; 8]);
}
#[test]
#[should_panic]
fn test_set_pba_bit_vector_too_big() {
let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
config.set_pba_bit(2048, false);
}
#[test]
#[should_panic]
fn test_get_pba_bit_vector_too_big() {
let config = MsixConfig::new(msix_vector_group(2), 0x42);
config.get_pba_bit(2048);
}
#[test]
fn test_pba_bit_invalid_vector() {
let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
// We have two vectors, so setting the pending bit for the third one
// should be ignored
config.set_pba_bit(2, false);
assert_eq!(config.pba_entries[0], 0);
// Same for getting the bit
assert_eq!(config.get_pba_bit(2), 0xff);
}
#[test]
fn test_pba_read() {
let mut config = MsixConfig::new(msix_vector_group(128), 0x42);
let mut buffer = [0u8; 8];
config.set_pba_bit(1, false);
assert_eq!(config.pba_entries[0], 2);
assert_eq!(config.pba_entries[1], 0);
config.read_pba(0, &mut buffer);
assert_eq!(0x2, u64::from_le_bytes(buffer));
let mut buffer = [0u8; 4];
config.set_pba_bit(96, false);
assert_eq!(config.pba_entries[0], 2);
assert_eq!(config.pba_entries[1], 0x1_0000_0000);
config.read_pba(8, &mut buffer);
assert_eq!(0x0, u32::from_le_bytes(buffer));
config.read_pba(12, &mut buffer);
assert_eq!(0x1, u32::from_le_bytes(buffer));
}
#[test]
fn test_pending_interrupt() {
let mut config = MsixConfig::new(msix_vector_group(2), 0x42);
config.set_pba_bit(1, false);
assert_eq!(config.get_pba_bit(1), 1);
// Enable MSI-X vector and unmask interrupts
// Individual vectors are still masked, so no change
check_metric_after_block!(METRICS.interrupts.triggers, 0, config.set_msg_ctl(0x8000));
// Enable all vectors
// Vector one had a pending bit, so we must have triggered an interrupt for it
// and cleared the pending bit
check_metric_after_block!(METRICS.interrupts.triggers, 1, {
config.write_table(8, &u64::to_le_bytes(0x0_0000_0020));
config.write_table(24, &u64::to_le_bytes(0x0_0000_0020));
});
assert_eq!(config.get_pba_bit(1), 0);
// Check that interrupt is sent as well for enabled vectors once we unmask from
// Message Control
// Mask vectors and set pending bit for vector 0
check_metric_after_block!(METRICS.interrupts.triggers, 0, {
config.set_msg_ctl(0xc000);
config.set_pba_bit(0, false);
});
// Unmask them
check_metric_after_block!(METRICS.interrupts.triggers, 1, config.set_msg_ctl(0x8000));
assert_eq!(config.get_pba_bit(0), 0);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/pci/bus.rs | src/vmm/src/pci/bus.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use std::collections::HashMap;
use std::fmt::Debug;
use std::ops::DerefMut;
use std::sync::{Arc, Barrier, Mutex};
use byteorder::{ByteOrder, LittleEndian};
use pci::{PciBridgeSubclass, PciClassCode};
use crate::logger::error;
use crate::pci::configuration::PciConfiguration;
use crate::pci::{DeviceRelocation, PciDevice};
use crate::utils::u64_to_usize;
use crate::vstate::bus::BusDevice;
/// Errors for device manager.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum PciRootError {
/// Could not find an available device slot on the PCI bus.
NoPciDeviceSlotAvailable,
}
const VENDOR_ID_INTEL: u16 = 0x8086;
const DEVICE_ID_INTEL_VIRT_PCIE_HOST: u16 = 0x0d57;
const NUM_DEVICE_IDS: usize = 32;
#[derive(Debug)]
/// Emulates the PCI Root bridge device.
pub struct PciRoot {
/// Configuration space.
config: PciConfiguration,
}
impl PciRoot {
/// Create an empty PCI root bridge.
pub fn new(config: Option<PciConfiguration>) -> Self {
if let Some(config) = config {
PciRoot { config }
} else {
PciRoot {
config: PciConfiguration::new_type0(
VENDOR_ID_INTEL,
DEVICE_ID_INTEL_VIRT_PCIE_HOST,
0,
PciClassCode::BridgeDevice,
&PciBridgeSubclass::HostBridge,
0,
0,
None,
),
}
}
}
}
impl BusDevice for PciRoot {}
impl PciDevice for PciRoot {
fn write_config_register(
&mut self,
reg_idx: usize,
offset: u64,
data: &[u8],
) -> Option<Arc<Barrier>> {
self.config.write_config_register(reg_idx, offset, data);
None
}
fn read_config_register(&mut self, reg_idx: usize) -> u32 {
self.config.read_reg(reg_idx)
}
}
/// A PCI bus definition
pub struct PciBus {
/// Devices attached to this bus.
/// Device 0 is host bridge.
pub devices: HashMap<u32, Arc<Mutex<dyn PciDevice>>>,
vm: Arc<dyn DeviceRelocation>,
device_ids: Vec<bool>,
}
impl Debug for PciBus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Root Firecracker PCI Bus")
.field("device_ids", &self.device_ids)
.finish()
}
}
impl PciBus {
/// Create a new PCI bus
pub fn new(pci_root: PciRoot, vm: Arc<dyn DeviceRelocation>) -> Self {
let mut devices: HashMap<u32, Arc<Mutex<dyn PciDevice>>> = HashMap::new();
let mut device_ids: Vec<bool> = vec![false; NUM_DEVICE_IDS];
devices.insert(0, Arc::new(Mutex::new(pci_root)));
device_ids[0] = true;
PciBus {
devices,
vm,
device_ids,
}
}
/// Insert a device in the bus
pub fn add_device(&mut self, device_id: u32, device: Arc<Mutex<dyn PciDevice>>) {
self.devices.insert(device_id, device);
}
/// Get a new device ID
pub fn next_device_id(&mut self) -> Result<u32, PciRootError> {
for (idx, device_id) in self.device_ids.iter_mut().enumerate() {
if !(*device_id) {
*device_id = true;
return Ok(idx.try_into().unwrap());
}
}
Err(PciRootError::NoPciDeviceSlotAvailable)
}
}
#[cfg(target_arch = "x86_64")]
/// IO port used for configuring PCI over the legacy bus
pub const PCI_CONFIG_IO_PORT: u64 = 0xcf8;
#[cfg(target_arch = "x86_64")]
/// Size of IO ports we are using to configure PCI over the legacy bus. We have two ports, 0xcf8
/// and 0xcfc 32bits long.
pub const PCI_CONFIG_IO_PORT_SIZE: u64 = 0x8;
/// Wrapper that allows handling PCI configuration over the legacy Bus
#[derive(Debug)]
pub struct PciConfigIo {
/// Config space register.
config_address: u32,
pci_bus: Arc<Mutex<PciBus>>,
}
impl PciConfigIo {
/// New Port IO configuration handler
pub fn new(pci_bus: Arc<Mutex<PciBus>>) -> Self {
PciConfigIo {
config_address: 0,
pci_bus,
}
}
/// Handle a configuration space read over Port IO
pub fn config_space_read(&self) -> u32 {
let enabled = (self.config_address & 0x8000_0000) != 0;
if !enabled {
return 0xffff_ffff;
}
let (bus, device, function, register) =
parse_io_config_address(self.config_address & !0x8000_0000);
// Only support one bus.
if bus != 0 {
return 0xffff_ffff;
}
// Don't support multi-function devices.
if function > 0 {
return 0xffff_ffff;
}
// NOTE: Potential contention among vCPU threads on this lock. This should not
// be a problem currently, since we mainly access this when we are setting up devices.
// We might want to do some profiling to ensure this does not become a bottleneck.
self.pci_bus
.as_ref()
.lock()
.unwrap()
.devices
.get(&(device.try_into().unwrap()))
.map_or(0xffff_ffff, |d| {
d.lock().unwrap().read_config_register(register)
})
}
/// Handle a configuration space write over Port IO
pub fn config_space_write(&mut self, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
if u64_to_usize(offset) + data.len() > 4 {
return None;
}
let enabled = (self.config_address & 0x8000_0000) != 0;
if !enabled {
return None;
}
let (bus, device, function, register) =
parse_io_config_address(self.config_address & !0x8000_0000);
// Only support one bus.
if bus != 0 {
return None;
}
// Don't support multi-function devices.
if function > 0 {
return None;
}
// NOTE: Potential contention among vCPU threads on this lock. This should not
// be a problem currently, since we mainly access this when we are setting up devices.
// We might want to do some profiling to ensure this does not become a bottleneck.
let pci_bus = self.pci_bus.as_ref().lock().unwrap();
if let Some(d) = pci_bus.devices.get(&(device.try_into().unwrap())) {
let mut device = d.lock().unwrap();
// Find out if one of the device's BAR is being reprogrammed, and
// reprogram it if needed.
if let Some(params) = device.detect_bar_reprogramming(register, data)
&& let Err(e) = pci_bus.vm.move_bar(
params.old_base,
params.new_base,
params.len,
device.deref_mut(),
)
{
error!(
"Failed moving device BAR: {}: 0x{:x}->0x{:x}(0x{:x})",
e, params.old_base, params.new_base, params.len
);
}
// Update the register value
device.write_config_register(register, offset, data)
} else {
None
}
}
fn set_config_address(&mut self, offset: u64, data: &[u8]) {
if u64_to_usize(offset) + data.len() > 4 {
return;
}
let (mask, value): (u32, u32) = match data.len() {
1 => (
0x0000_00ff << (offset * 8),
u32::from(data[0]) << (offset * 8),
),
2 => (
0x0000_ffff << (offset * 8),
((u32::from(data[1]) << 8) | u32::from(data[0])) << (offset * 8),
),
4 => (0xffff_ffff, LittleEndian::read_u32(data)),
_ => return,
};
self.config_address = (self.config_address & !mask) | value;
}
}
impl BusDevice for PciConfigIo {
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
// Only allow reads to the register boundary.
let start = u64_to_usize(offset) % 4;
let end = start + data.len();
if end > 4 {
for d in data.iter_mut() {
*d = 0xff;
}
return;
}
// `offset` is relative to 0xcf8
let value = match offset {
0..=3 => self.config_address,
4..=7 => self.config_space_read(),
_ => 0xffff_ffff,
};
for i in start..end {
data[i - start] = ((value >> (i * 8)) & 0xff) as u8;
}
}
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
// `offset` is relative to 0xcf8
match offset {
o @ 0..=3 => {
self.set_config_address(o, data);
None
}
o @ 4..=7 => self.config_space_write(o - 4, data),
_ => None,
}
}
}
#[derive(Debug)]
/// Emulates PCI memory-mapped configuration access mechanism.
pub struct PciConfigMmio {
pci_bus: Arc<Mutex<PciBus>>,
}
impl PciConfigMmio {
/// New MMIO configuration handler object
pub fn new(pci_bus: Arc<Mutex<PciBus>>) -> Self {
PciConfigMmio { pci_bus }
}
fn config_space_read(&self, config_address: u32) -> u32 {
let (bus, device, function, register) = parse_mmio_config_address(config_address);
// Only support one bus.
if bus != 0 {
return 0xffff_ffff;
}
// Don't support multi-function devices.
if function > 0 {
return 0xffff_ffff;
}
self.pci_bus
.lock()
.unwrap()
.devices
.get(&(device.try_into().unwrap()))
.map_or(0xffff_ffff, |d| {
d.lock().unwrap().read_config_register(register)
})
}
fn config_space_write(&mut self, config_address: u32, offset: u64, data: &[u8]) {
if u64_to_usize(offset) + data.len() > 4 {
return;
}
let (bus, device, function, register) = parse_mmio_config_address(config_address);
// Only support one bus.
if bus != 0 {
return;
}
// Don't support multi-function devices.
if function > 0 {
return;
}
let pci_bus = self.pci_bus.lock().unwrap();
if let Some(d) = pci_bus.devices.get(&(device.try_into().unwrap())) {
let mut device = d.lock().unwrap();
// Find out if one of the device's BAR is being reprogrammed, and
// reprogram it if needed.
if let Some(params) = device.detect_bar_reprogramming(register, data)
&& let Err(e) = pci_bus.vm.move_bar(
params.old_base,
params.new_base,
params.len,
device.deref_mut(),
)
{
error!(
"Failed moving device BAR: {}: 0x{:x}->0x{:x}(0x{:x})",
e, params.old_base, params.new_base, params.len
);
}
// Update the register value
device.write_config_register(register, offset, data);
}
}
}
impl BusDevice for PciConfigMmio {
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
// Only allow reads to the register boundary.
let start = u64_to_usize(offset) % 4;
let end = start + data.len();
if end > 4 || offset > u64::from(u32::MAX) {
for d in data {
*d = 0xff;
}
return;
}
let value = self.config_space_read(offset.try_into().unwrap());
for i in start..end {
data[i - start] = ((value >> (i * 8)) & 0xff) as u8;
}
}
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
if offset > u64::from(u32::MAX) {
return None;
}
self.config_space_write(offset.try_into().unwrap(), offset % 4, data);
None
}
}
fn shift_and_mask(value: u32, offset: usize, mask: u32) -> usize {
((value >> offset) & mask) as usize
}
// Parse the MMIO address offset to a (bus, device, function, register) tuple.
// See section 7.2.2 PCI Express Enhanced Configuration Access Mechanism (ECAM)
// from the Pci Express Base Specification Revision 5.0 Version 1.0.
fn parse_mmio_config_address(config_address: u32) -> (usize, usize, usize, usize) {
const BUS_NUMBER_OFFSET: usize = 20;
const BUS_NUMBER_MASK: u32 = 0x00ff;
const DEVICE_NUMBER_OFFSET: usize = 15;
const DEVICE_NUMBER_MASK: u32 = 0x1f;
const FUNCTION_NUMBER_OFFSET: usize = 12;
const FUNCTION_NUMBER_MASK: u32 = 0x07;
const REGISTER_NUMBER_OFFSET: usize = 2;
const REGISTER_NUMBER_MASK: u32 = 0x3ff;
(
shift_and_mask(config_address, BUS_NUMBER_OFFSET, BUS_NUMBER_MASK),
shift_and_mask(config_address, DEVICE_NUMBER_OFFSET, DEVICE_NUMBER_MASK),
shift_and_mask(config_address, FUNCTION_NUMBER_OFFSET, FUNCTION_NUMBER_MASK),
shift_and_mask(config_address, REGISTER_NUMBER_OFFSET, REGISTER_NUMBER_MASK),
)
}
// Parse the CONFIG_ADDRESS register to a (bus, device, function, register) tuple.
fn parse_io_config_address(config_address: u32) -> (usize, usize, usize, usize) {
const BUS_NUMBER_OFFSET: usize = 16;
const BUS_NUMBER_MASK: u32 = 0x00ff;
const DEVICE_NUMBER_OFFSET: usize = 11;
const DEVICE_NUMBER_MASK: u32 = 0x1f;
const FUNCTION_NUMBER_OFFSET: usize = 8;
const FUNCTION_NUMBER_MASK: u32 = 0x07;
const REGISTER_NUMBER_OFFSET: usize = 2;
const REGISTER_NUMBER_MASK: u32 = 0x3f;
(
shift_and_mask(config_address, BUS_NUMBER_OFFSET, BUS_NUMBER_MASK),
shift_and_mask(config_address, DEVICE_NUMBER_OFFSET, DEVICE_NUMBER_MASK),
shift_and_mask(config_address, FUNCTION_NUMBER_OFFSET, FUNCTION_NUMBER_MASK),
shift_and_mask(config_address, REGISTER_NUMBER_OFFSET, REGISTER_NUMBER_MASK),
)
}
#[cfg(test)]
mod tests {
use std::sync::atomic::AtomicUsize;
use std::sync::{Arc, Mutex};
use pci::{PciClassCode, PciMassStorageSubclass};
use super::{PciBus, PciConfigIo, PciConfigMmio, PciRoot};
use crate::pci::bus::{DEVICE_ID_INTEL_VIRT_PCIE_HOST, VENDOR_ID_INTEL};
use crate::pci::configuration::PciConfiguration;
use crate::pci::{BarReprogrammingParams, DeviceRelocation, DeviceRelocationError, PciDevice};
use crate::vstate::bus::BusDevice;
#[derive(Debug, Default)]
struct RelocationMock {
reloc_cnt: AtomicUsize,
}
impl RelocationMock {
fn cnt(&self) -> usize {
self.reloc_cnt.load(std::sync::atomic::Ordering::SeqCst)
}
}
impl DeviceRelocation for RelocationMock {
fn move_bar(
&self,
_old_base: u64,
_new_base: u64,
_len: u64,
_pci_dev: &mut dyn PciDevice,
) -> Result<(), DeviceRelocationError> {
self.reloc_cnt
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
Ok(())
}
}
struct PciDevMock(PciConfiguration);
impl PciDevMock {
fn new() -> Self {
let mut config = PciConfiguration::new_type0(
0x42,
0x0,
0x0,
PciClassCode::MassStorage,
&PciMassStorageSubclass::SerialScsiController,
0x13,
0x12,
None,
);
config.add_pci_bar(0, 0x1000, 0x1000);
PciDevMock(config)
}
}
impl PciDevice for PciDevMock {
fn write_config_register(
&mut self,
reg_idx: usize,
offset: u64,
data: &[u8],
) -> Option<Arc<std::sync::Barrier>> {
self.0.write_config_register(reg_idx, offset, data);
None
}
fn read_config_register(&mut self, reg_idx: usize) -> u32 {
self.0.read_reg(reg_idx)
}
fn detect_bar_reprogramming(
&mut self,
reg_idx: usize,
data: &[u8],
) -> Option<BarReprogrammingParams> {
self.0.detect_bar_reprogramming(reg_idx, data)
}
}
#[test]
fn test_writing_io_config_address() {
let mock = Arc::new(RelocationMock::default());
let root = PciRoot::new(None);
let mut bus = PciConfigIo::new(Arc::new(Mutex::new(PciBus::new(root, mock))));
assert_eq!(bus.config_address, 0);
// Writing more than 32 bits will should fail
bus.write(0, 0, &[0x42; 8]);
assert_eq!(bus.config_address, 0);
// Write all the address at once
bus.write(0, 0, &[0x13, 0x12, 0x11, 0x10]);
assert_eq!(bus.config_address, 0x10111213);
// Not writing 32bits at offset 0 should have no effect
bus.write(0, 1, &[0x0; 4]);
assert_eq!(bus.config_address, 0x10111213);
// Write two bytes at a time
bus.write(0, 0, &[0x42, 0x42]);
assert_eq!(bus.config_address, 0x10114242);
bus.write(0, 1, &[0x43, 0x43]);
assert_eq!(bus.config_address, 0x10434342);
bus.write(0, 2, &[0x44, 0x44]);
assert_eq!(bus.config_address, 0x44444342);
// Writing two bytes at offset 3 should overflow, so it shouldn't have any effect
bus.write(0, 3, &[0x45, 0x45]);
assert_eq!(bus.config_address, 0x44444342);
// Write one byte at a time
bus.write(0, 0, &[0x0]);
assert_eq!(bus.config_address, 0x44444300);
bus.write(0, 1, &[0x0]);
assert_eq!(bus.config_address, 0x44440000);
bus.write(0, 2, &[0x0]);
assert_eq!(bus.config_address, 0x44000000);
bus.write(0, 3, &[0x0]);
assert_eq!(bus.config_address, 0x00000000);
// Writing past 4 bytes should have no effect
bus.write(0, 4, &[0x13]);
assert_eq!(bus.config_address, 0x0);
}
#[test]
fn test_reading_io_config_address() {
let mock = Arc::new(RelocationMock::default());
let root = PciRoot::new(None);
let mut bus = PciConfigIo::new(Arc::new(Mutex::new(PciBus::new(root, mock))));
let mut buffer = [0u8; 4];
bus.config_address = 0x13121110;
// First 4 bytes are the config address
// Next 4 bytes are the values read from the configuration space.
//
// Reading past offset 7 should not return nothing (all 1s)
bus.read(0, 8, &mut buffer);
assert_eq!(buffer, [0xff; 4]);
// offset + buffer.len() needs to be smaller or equal than 4
bus.read(0, 1, &mut buffer);
assert_eq!(buffer, [0xff; 4]);
bus.read(0, 2, &mut buffer[..3]);
assert_eq!(buffer, [0xff; 4]);
bus.read(0, 3, &mut buffer[..2]);
assert_eq!(buffer, [0xff; 4]);
// reading one byte at a time
bus.read(0, 0, &mut buffer[0..1]);
assert_eq!(buffer, [0x10, 0xff, 0xff, 0xff]);
bus.read(0, 1, &mut buffer[1..2]);
assert_eq!(buffer, [0x10, 0x11, 0xff, 0xff]);
bus.read(0, 2, &mut buffer[2..3]);
assert_eq!(buffer, [0x10, 0x11, 0x12, 0xff]);
bus.read(0, 3, &mut buffer[3..4]);
assert_eq!(buffer, [0x10, 0x11, 0x12, 0x13]);
// reading two bytes at a time
bus.config_address = 0x42434445;
bus.read(0, 0, &mut buffer[..2]);
assert_eq!(buffer, [0x45, 0x44, 0x12, 0x13]);
bus.read(0, 1, &mut buffer[..2]);
assert_eq!(buffer, [0x44, 0x43, 0x12, 0x13]);
bus.read(0, 2, &mut buffer[..2]);
assert_eq!(buffer, [0x43, 0x42, 0x12, 0x13]);
// reading all of it at once
bus.read(0, 0, &mut buffer);
assert_eq!(buffer, [0x45, 0x44, 0x43, 0x42]);
}
fn initialize_bus() -> (PciConfigMmio, PciConfigIo, Arc<RelocationMock>) {
let mock = Arc::new(RelocationMock::default());
let root = PciRoot::new(None);
let mut bus = PciBus::new(root, mock.clone());
bus.add_device(1, Arc::new(Mutex::new(PciDevMock::new())));
let bus = Arc::new(Mutex::new(bus));
(PciConfigMmio::new(bus.clone()), PciConfigIo::new(bus), mock)
}
#[test]
fn test_invalid_register_boundary_reads() {
let (mut mmio_config, mut io_config, _) = initialize_bus();
// Read crossing register boundaries
let mut buffer = [0u8; 4];
mmio_config.read(0, 1, &mut buffer);
assert_eq!(0xffff_ffff, u32::from_le_bytes(buffer));
let mut buffer = [0u8; 4];
io_config.read(0, 1, &mut buffer);
assert_eq!(0xffff_ffff, u32::from_le_bytes(buffer));
// As well in the config space
let mut buffer = [0u8; 4];
io_config.read(0, 5, &mut buffer);
assert_eq!(0xffff_ffff, u32::from_le_bytes(buffer));
}
// MMIO config addresses are of the form
//
// | Base address upper bits | Bus Number | Device Number | Function Number | Register number | Byte offset |
// | 31-28 | 27-20 | 19-15 | 14-12 | 11-2 | 0-1 |
//
// Meaning that the offset is built using:
//
// `bus << 20 | device << 15 | function << 12 | register << 2 | byte`
fn mmio_offset(bus: u8, device: u8, function: u8, register: u16, byte: u8) -> u32 {
assert!(device < 32);
assert!(function < 8);
assert!(register < 1024);
assert!(byte < 4);
(bus as u32) << 20
| (device as u32) << 15
| (function as u32) << 12
| (register as u32) << 2
| (byte as u32)
}
fn read_mmio_config(
config: &mut PciConfigMmio,
bus: u8,
device: u8,
function: u8,
register: u16,
byte: u8,
data: &mut [u8],
) {
config.read(
0,
mmio_offset(bus, device, function, register, byte) as u64,
data,
);
}
fn write_mmio_config(
config: &mut PciConfigMmio,
bus: u8,
device: u8,
function: u8,
register: u16,
byte: u8,
data: &[u8],
) {
config.write(
0,
mmio_offset(bus, device, function, register, byte) as u64,
data,
);
}
// Similarly, when using the IO mechanism the config addresses have the following format
//
// | Enabled | zeros | Bus Number | Device Number | Function Number | Register number | zeros |
// | 31 | 30-24 | 23-16 | 15-11 | 10-8 | 7-2 | 1-0 |
//
//
// Meaning that the address is built using:
//
// 0x8000_0000 | bus << 16 | device << 11 | function << 8 | register << 2;
//
// Only 32-bit aligned accesses are allowed here.
fn pio_offset(enabled: bool, bus: u8, device: u8, function: u8, register: u8) -> u32 {
assert!(device < 32);
assert!(function < 8);
assert!(register < 64);
let offset = if enabled { 0x8000_0000 } else { 0u32 };
offset
| (bus as u32) << 16
| (device as u32) << 11
| (function as u32) << 8
| (register as u32) << 2
}
fn set_io_address(
config: &mut PciConfigIo,
enabled: bool,
bus: u8,
device: u8,
function: u8,
register: u8,
) {
let address = u32::to_le_bytes(pio_offset(enabled, bus, device, function, register));
config.write(0, 0, &address);
}
fn read_io_config(
config: &mut PciConfigIo,
enabled: bool,
bus: u8,
device: u8,
function: u8,
register: u8,
data: &mut [u8],
) {
set_io_address(config, enabled, bus, device, function, register);
config.read(0, 4, data);
}
fn write_io_config(
config: &mut PciConfigIo,
enabled: bool,
bus: u8,
device: u8,
function: u8,
register: u8,
data: &[u8],
) {
set_io_address(config, enabled, bus, device, function, register);
config.write(0, 4, data);
}
#[test]
fn test_mmio_invalid_bus_number() {
let (mut mmio_config, _, _) = initialize_bus();
let mut buffer = [0u8; 4];
// Asking for Bus 1 should return all 1s
read_mmio_config(&mut mmio_config, 1, 0, 0, 0, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0xffff_ffff));
// Writing the same
buffer[0] = 0x42;
write_mmio_config(&mut mmio_config, 1, 0, 0, 15, 0, &buffer);
read_mmio_config(&mut mmio_config, 1, 0, 0, 15, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0xffff_ffff));
read_mmio_config(&mut mmio_config, 0, 0, 0, 15, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0x0));
// Asking for Bus 0 should work
read_mmio_config(&mut mmio_config, 0, 0, 0, 0, 0, &mut buffer);
assert_eq!(&buffer[..2], &u16::to_le_bytes(VENDOR_ID_INTEL));
assert_eq!(
&buffer[2..],
&u16::to_le_bytes(DEVICE_ID_INTEL_VIRT_PCIE_HOST)
);
}
#[test]
fn test_io_invalid_bus_number() {
let (_, mut pio_config, _) = initialize_bus();
let mut buffer = [0u8; 4];
// Asking for Bus 1 should return all 1s
read_io_config(&mut pio_config, true, 1, 0, 0, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0xffff_ffff));
// Asking for Bus 0 should work
read_io_config(&mut pio_config, true, 0, 0, 0, 0, &mut buffer);
assert_eq!(&buffer[..2], &u16::to_le_bytes(VENDOR_ID_INTEL));
assert_eq!(
&buffer[2..],
&u16::to_le_bytes(DEVICE_ID_INTEL_VIRT_PCIE_HOST)
);
}
#[test]
fn test_mmio_invalid_function() {
let (mut mmio_config, _, _) = initialize_bus();
let mut buffer = [0u8; 4];
// Asking for Bus 1 should return all 1s
read_mmio_config(&mut mmio_config, 0, 0, 1, 0, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0xffff_ffff));
// Writing the same
buffer[0] = 0x42;
write_mmio_config(&mut mmio_config, 0, 0, 1, 15, 0, &buffer);
read_mmio_config(&mut mmio_config, 0, 0, 1, 15, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0xffff_ffff));
read_mmio_config(&mut mmio_config, 0, 0, 0, 15, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0x0));
// Asking for Bus 0 should work
read_mmio_config(&mut mmio_config, 0, 0, 0, 0, 0, &mut buffer);
assert_eq!(&buffer[..2], &u16::to_le_bytes(VENDOR_ID_INTEL));
assert_eq!(
&buffer[2..],
&u16::to_le_bytes(DEVICE_ID_INTEL_VIRT_PCIE_HOST)
);
}
#[test]
fn test_io_invalid_function() {
let (_, mut pio_config, _) = initialize_bus();
let mut buffer = [0u8; 4];
// Asking for Bus 1 should return all 1s
read_io_config(&mut pio_config, true, 0, 0, 1, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0xffff_ffff));
// Asking for Bus 0 should work
read_io_config(&mut pio_config, true, 0, 0, 0, 0, &mut buffer);
assert_eq!(&buffer[..2], &u16::to_le_bytes(VENDOR_ID_INTEL));
assert_eq!(
&buffer[2..],
&u16::to_le_bytes(DEVICE_ID_INTEL_VIRT_PCIE_HOST)
);
}
#[test]
fn test_io_disabled_reads() {
let (_, mut pio_config, _) = initialize_bus();
let mut buffer = [0u8; 4];
// Trying to read without enabling should return all 1s
read_io_config(&mut pio_config, false, 0, 0, 0, 0, &mut buffer);
assert_eq!(buffer, u32::to_le_bytes(0xffff_ffff));
// Asking for Bus 0 should work
read_io_config(&mut pio_config, true, 0, 0, 0, 0, &mut buffer);
assert_eq!(&buffer[..2], &u16::to_le_bytes(VENDOR_ID_INTEL));
assert_eq!(
&buffer[2..],
&u16::to_le_bytes(DEVICE_ID_INTEL_VIRT_PCIE_HOST)
);
}
#[test]
fn test_io_disabled_writes() {
let (_, mut pio_config, _) = initialize_bus();
// Try to write the IRQ line used for the root port.
let mut buffer = [0u8; 4];
// First read the current value (use `enabled` bit)
read_io_config(&mut pio_config, true, 0, 0, 0, 15, &mut buffer);
let irq_line = buffer[0];
// Write without setting the `enabled` bit.
buffer[0] = 0x42;
write_io_config(&mut pio_config, false, 0, 0, 0, 15, &buffer);
// IRQ line shouldn't have changed
read_io_config(&mut pio_config, true, 0, 0, 0, 15, &mut buffer);
assert_eq!(buffer[0], irq_line);
// Write with `enabled` bit set.
buffer[0] = 0x42;
write_io_config(&mut pio_config, true, 0, 0, 0, 15, &buffer);
// IRQ line should change
read_io_config(&mut pio_config, true, 0, 0, 0, 15, &mut buffer);
assert_eq!(buffer[0], 0x42);
}
#[test]
fn test_mmio_writes() {
let (mut mmio_config, _, _) = initialize_bus();
let mut buffer = [0u8; 4];
read_mmio_config(&mut mmio_config, 0, 0, 0, 15, 0, &mut buffer);
assert_eq!(buffer[0], 0x0);
write_mmio_config(&mut mmio_config, 0, 0, 0, 15, 0, &[0x42]);
read_mmio_config(&mut mmio_config, 0, 0, 0, 15, 0, &mut buffer);
assert_eq!(buffer[0], 0x42);
}
#[test]
fn test_bar_reprogramming() {
let (mut mmio_config, _, mock) = initialize_bus();
let mut buffer = [0u8; 4];
assert_eq!(mock.cnt(), 0);
read_mmio_config(&mut mmio_config, 0, 1, 0, 0x4, 0, &mut buffer);
let old_addr = u32::from_le_bytes(buffer) & 0xffff_fff0;
assert_eq!(old_addr, 0x1000);
// Writing the lower 32bits first should not trigger any reprogramming
write_mmio_config(
&mut mmio_config,
0,
1,
0,
0x4,
0,
&u32::to_le_bytes(0x1312_0000),
);
read_mmio_config(&mut mmio_config, 0, 1, 0, 0x4, 0, &mut buffer);
let new_addr = u32::from_le_bytes(buffer) & 0xffff_fff0;
assert_eq!(new_addr, 0x1312_0000);
assert_eq!(mock.cnt(), 0);
// Writing the upper 32bits first should now trigger the reprogramming logic
write_mmio_config(&mut mmio_config, 0, 1, 0, 0x5, 0, &u32::to_le_bytes(0x1110));
read_mmio_config(&mut mmio_config, 0, 1, 0, 0x5, 0, &mut buffer);
let new_addr = u32::from_le_bytes(buffer);
assert_eq!(new_addr, 0x1110);
assert_eq!(mock.cnt(), 1);
// BAR2 should not be used, so reading its address should return all 0s
read_mmio_config(&mut mmio_config, 0, 1, 0, 0x6, 0, &mut buffer);
assert_eq!(buffer, [0x0, 0x0, 0x0, 0x0]);
// and reprogramming shouldn't have any effect
write_mmio_config(
&mut mmio_config,
0,
1,
0,
0x5,
0,
&u32::to_le_bytes(0x1312_1110),
);
read_mmio_config(&mut mmio_config, 0, 1, 0, 0x6, 0, &mut buffer);
assert_eq!(buffer, [0x0, 0x0, 0x0, 0x0]);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/vsock.rs | src/vmm/src/vmm_config/vsock.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::TryFrom;
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use crate::devices::virtio::vsock::{Vsock, VsockError, VsockUnixBackend, VsockUnixBackendError};
type MutexVsockUnix = Arc<Mutex<Vsock<VsockUnixBackend>>>;
/// Errors associated with `NetworkInterfaceConfig`.
#[derive(Debug, derive_more::From, thiserror::Error, displaydoc::Display)]
pub enum VsockConfigError {
/// Cannot create backend for vsock device: {0}
CreateVsockBackend(VsockUnixBackendError),
/// Cannot create vsock device: {0}
CreateVsockDevice(VsockError),
}
/// This struct represents the strongly typed equivalent of the json body
/// from vsock related requests.
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct VsockDeviceConfig {
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
/// ID of the vsock device.
pub vsock_id: Option<String>,
/// A 32-bit Context Identifier (CID) used to identify the guest.
pub guest_cid: u32,
/// Path to local unix socket.
pub uds_path: String,
}
#[derive(Debug)]
struct VsockAndUnixPath {
vsock: MutexVsockUnix,
uds_path: String,
}
impl From<&VsockAndUnixPath> for VsockDeviceConfig {
fn from(vsock: &VsockAndUnixPath) -> Self {
let vsock_lock = vsock.vsock.lock().unwrap();
VsockDeviceConfig {
vsock_id: None,
guest_cid: u32::try_from(vsock_lock.cid()).unwrap(),
uds_path: vsock.uds_path.clone(),
}
}
}
/// A builder of Vsock with Unix backend from 'VsockDeviceConfig'.
#[derive(Debug, Default)]
pub struct VsockBuilder {
inner: Option<VsockAndUnixPath>,
}
impl VsockBuilder {
/// Creates an empty Vsock with Unix backend Store.
pub fn new() -> Self {
Self { inner: None }
}
/// Inserts an existing vsock device.
pub fn set_device(&mut self, device: Arc<Mutex<Vsock<VsockUnixBackend>>>) {
self.inner = Some(VsockAndUnixPath {
uds_path: device
.lock()
.expect("Poisoned lock")
.backend()
.host_sock_path()
.to_owned(),
vsock: device.clone(),
});
}
/// Inserts a Unix backend Vsock in the store.
/// If an entry already exists, it will overwrite it.
pub fn insert(&mut self, cfg: VsockDeviceConfig) -> Result<(), VsockConfigError> {
// Make sure to drop the old one and remove the socket before creating a new one.
if let Some(existing) = self.inner.take() {
std::fs::remove_file(existing.uds_path).map_err(VsockUnixBackendError::UnixBind)?;
}
self.inner = Some(VsockAndUnixPath {
uds_path: cfg.uds_path.clone(),
vsock: Arc::new(Mutex::new(Self::create_unixsock_vsock(cfg)?)),
});
Ok(())
}
/// Provides a reference to the Vsock if present.
pub fn get(&self) -> Option<&MutexVsockUnix> {
self.inner.as_ref().map(|pair| &pair.vsock)
}
/// Creates a Vsock device from a VsockDeviceConfig.
pub fn create_unixsock_vsock(
cfg: VsockDeviceConfig,
) -> Result<Vsock<VsockUnixBackend>, VsockConfigError> {
let backend = VsockUnixBackend::new(u64::from(cfg.guest_cid), cfg.uds_path)?;
Vsock::new(u64::from(cfg.guest_cid), backend).map_err(VsockConfigError::CreateVsockDevice)
}
/// Returns the structure used to configure the vsock device.
pub fn config(&self) -> Option<VsockDeviceConfig> {
self.inner.as_ref().map(VsockDeviceConfig::from)
}
}
#[cfg(test)]
pub(crate) mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::devices::virtio::vsock::VSOCK_DEV_ID;
pub(crate) fn default_config(tmp_sock_file: &TempFile) -> VsockDeviceConfig {
VsockDeviceConfig {
vsock_id: None,
guest_cid: 3,
uds_path: tmp_sock_file.as_path().to_str().unwrap().to_string(),
}
}
#[test]
fn test_vsock_create() {
let mut tmp_sock_file = TempFile::new().unwrap();
tmp_sock_file.remove().unwrap();
let vsock_config = default_config(&tmp_sock_file);
VsockBuilder::create_unixsock_vsock(vsock_config).unwrap();
}
#[test]
fn test_vsock_insert() {
let mut store = VsockBuilder::new();
let mut tmp_sock_file = TempFile::new().unwrap();
tmp_sock_file.remove().unwrap();
let mut vsock_config = default_config(&tmp_sock_file);
store.insert(vsock_config.clone()).unwrap();
let vsock = store.get().unwrap();
assert_eq!(vsock.lock().unwrap().id(), VSOCK_DEV_ID);
let new_cid = vsock_config.guest_cid + 1;
vsock_config.guest_cid = new_cid;
store.insert(vsock_config).unwrap();
let vsock = store.get().unwrap();
assert_eq!(vsock.lock().unwrap().cid(), u64::from(new_cid));
}
#[test]
fn test_vsock_config() {
let mut vsock_builder = VsockBuilder::new();
let mut tmp_sock_file = TempFile::new().unwrap();
tmp_sock_file.remove().unwrap();
let vsock_config = default_config(&tmp_sock_file);
vsock_builder.insert(vsock_config.clone()).unwrap();
let config = vsock_builder.config();
assert!(config.is_some());
assert_eq!(config.unwrap(), vsock_config);
}
#[test]
fn test_set_device() {
let mut vsock_builder = VsockBuilder::new();
let mut tmp_sock_file = TempFile::new().unwrap();
tmp_sock_file.remove().unwrap();
let vsock = Vsock::new(
0,
VsockUnixBackend::new(1, tmp_sock_file.as_path().to_str().unwrap().to_string())
.unwrap(),
)
.unwrap();
vsock_builder.set_device(Arc::new(Mutex::new(vsock)));
assert!(vsock_builder.inner.is_some());
assert_eq!(
vsock_builder.inner.unwrap().uds_path,
tmp_sock_file.as_path().to_str().unwrap().to_string()
)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/entropy.rs | src/vmm/src/vmm_config/entropy.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use super::RateLimiterConfig;
use crate::devices::virtio::rng::{Entropy, EntropyError};
/// This struct represents the strongly typed equivalent of the json body from entropy device
/// related requests.
#[derive(Debug, Default, Clone, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct EntropyDeviceConfig {
/// Configuration for RateLimiter of Entropy device
pub rate_limiter: Option<RateLimiterConfig>,
}
impl From<&Entropy> for EntropyDeviceConfig {
fn from(dev: &Entropy) -> Self {
let rate_limiter: RateLimiterConfig = dev.rate_limiter().into();
EntropyDeviceConfig {
rate_limiter: rate_limiter.into_option(),
}
}
}
/// Errors that can occur while handling configuration for
/// an entropy device
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum EntropyDeviceError {
/// Could not create Entropy device: {0}
CreateDevice(#[from] EntropyError),
/// Could not create RateLimiter from configuration: {0}
CreateRateLimiter(#[from] std::io::Error),
}
/// A builder type used to construct an Entropy device
#[derive(Debug, Default)]
pub struct EntropyDeviceBuilder(Option<Arc<Mutex<Entropy>>>);
impl EntropyDeviceBuilder {
/// Create a new instance for the builder
pub fn new() -> Self {
Self(None)
}
/// Build an entropy device and return a (counted) reference to it protected by a mutex
pub fn build(
&mut self,
config: EntropyDeviceConfig,
) -> Result<Arc<Mutex<Entropy>>, EntropyDeviceError> {
let rate_limiter = config
.rate_limiter
.map(RateLimiterConfig::try_into)
.transpose()?;
let dev = Arc::new(Mutex::new(Entropy::new(rate_limiter.unwrap_or_default())?));
self.0 = Some(dev.clone());
Ok(dev)
}
/// Insert a new entropy device from a configuration object
pub fn insert(&mut self, config: EntropyDeviceConfig) -> Result<(), EntropyDeviceError> {
let _ = self.build(config)?;
Ok(())
}
/// Get a reference to the entropy device, if present
pub fn get(&self) -> Option<&Arc<Mutex<Entropy>>> {
self.0.as_ref()
}
/// Get the configuration of the entropy device (if any)
pub fn config(&self) -> Option<EntropyDeviceConfig> {
self.0
.as_ref()
.map(|dev| EntropyDeviceConfig::from(dev.lock().unwrap().deref()))
}
/// Set the entropy device from an already created object
pub fn set_device(&mut self, device: Arc<Mutex<Entropy>>) {
self.0 = Some(device);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::rate_limiter::RateLimiter;
#[test]
fn test_entropy_device_create() {
let config = EntropyDeviceConfig::default();
let mut builder = EntropyDeviceBuilder::new();
assert!(builder.get().is_none());
builder.insert(config.clone()).unwrap();
assert!(builder.get().is_some());
assert_eq!(builder.config().unwrap(), config);
}
#[test]
fn test_set_device() {
let mut builder = EntropyDeviceBuilder::new();
let device = Entropy::new(RateLimiter::default()).unwrap();
assert!(builder.0.is_none());
builder.set_device(Arc::new(Mutex::new(device)));
assert!(builder.0.is_some());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/machine_config.rs | src/vmm/src/vmm_config/machine_config.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::cpu_config::templates::{CpuTemplateType, CustomCpuTemplate, StaticCpuTemplate};
/// The default memory size of the VM, in MiB.
pub const DEFAULT_MEM_SIZE_MIB: usize = 128;
/// Firecracker aims to support small scale workloads only, so limit the maximum
/// vCPUs supported.
pub const MAX_SUPPORTED_VCPUS: u8 = 32;
/// Errors associated with configuring the microVM.
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum MachineConfigError {
/// The memory size (MiB) is smaller than the previously set balloon device target size.
IncompatibleBalloonSize,
/// The memory size (MiB) is either 0, or not a multiple of the configured page size.
InvalidMemorySize,
/// The number of vCPUs must be greater than 0, less than {MAX_SUPPORTED_VCPUS:} and must be 1 or an even number if SMT is enabled.
InvalidVcpuCount,
/// Could not get the configuration of the previously installed balloon device to validate the memory size.
InvalidVmState,
/// Enabling simultaneous multithreading is not supported on aarch64.
#[cfg(target_arch = "aarch64")]
SmtNotSupported,
/// Could not determine host kernel version when checking hugetlbfs compatibility
KernelVersion,
}
/// Describes the possible (huge)page configurations for a microVM's memory.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub enum HugePageConfig {
/// Do not use hugepages, e.g. back guest memory by 4K
#[default]
None,
/// Back guest memory by 2MB hugetlbfs pages
#[serde(rename = "2M")]
Hugetlbfs2M,
}
impl HugePageConfig {
/// Checks whether the given memory size (in MiB) is valid for this [`HugePageConfig`], e.g.
/// whether it is a multiple of the page size
fn is_valid_mem_size(&self, mem_size_mib: usize) -> bool {
let divisor = match self {
// Any integer memory size expressed in MiB will be a multiple of 4096KiB.
HugePageConfig::None => 1,
HugePageConfig::Hugetlbfs2M => 2,
};
mem_size_mib % divisor == 0
}
/// Returns the flags required to pass to `mmap`, in addition to `MAP_ANONYMOUS`, to
/// create a mapping backed by huge pages as described by this [`HugePageConfig`].
pub fn mmap_flags(&self) -> libc::c_int {
match self {
HugePageConfig::None => 0,
HugePageConfig::Hugetlbfs2M => libc::MAP_HUGETLB | libc::MAP_HUGE_2MB,
}
}
/// Returns `true` iff this [`HugePageConfig`] describes a hugetlbfs-based configuration.
pub fn is_hugetlbfs(&self) -> bool {
matches!(self, HugePageConfig::Hugetlbfs2M)
}
/// Gets the page size in bytes of this [`HugePageConfig`].
pub fn page_size(&self) -> usize {
match self {
HugePageConfig::None => 4096,
HugePageConfig::Hugetlbfs2M => 2 * 1024 * 1024,
}
}
}
impl From<HugePageConfig> for Option<memfd::HugetlbSize> {
fn from(value: HugePageConfig) -> Self {
match value {
HugePageConfig::None => None,
HugePageConfig::Hugetlbfs2M => Some(memfd::HugetlbSize::Huge2MB),
}
}
}
/// Struct used in PUT `/machine-config` API call.
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct MachineConfig {
/// Number of vcpu to start.
pub vcpu_count: u8,
/// The memory size in MiB.
pub mem_size_mib: usize,
/// Enables or disabled SMT.
#[serde(default)]
pub smt: bool,
/// A CPU template that it is used to filter the CPU features exposed to the guest.
// FIXME: once support for static CPU templates is removed, this field can be dropped altogether
#[serde(
default,
skip_serializing_if = "is_none_or_custom_template",
deserialize_with = "deserialize_static_template",
serialize_with = "serialize_static_template"
)]
pub cpu_template: Option<CpuTemplateType>,
/// Enables or disables dirty page tracking. Enabling allows incremental snapshots.
#[serde(default)]
pub track_dirty_pages: bool,
/// Configures what page size Firecracker should use to back guest memory.
#[serde(default)]
pub huge_pages: HugePageConfig,
/// GDB socket address.
#[cfg(feature = "gdb")]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gdb_socket_path: Option<String>,
}
fn is_none_or_custom_template(template: &Option<CpuTemplateType>) -> bool {
matches!(template, None | Some(CpuTemplateType::Custom(_)))
}
fn deserialize_static_template<'de, D>(deserializer: D) -> Result<Option<CpuTemplateType>, D::Error>
where
D: Deserializer<'de>,
{
Option::<StaticCpuTemplate>::deserialize(deserializer)
.map(|maybe_template| maybe_template.map(CpuTemplateType::Static))
}
fn serialize_static_template<S>(
template: &Option<CpuTemplateType>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let Some(CpuTemplateType::Static(template)) = template else {
// We have a skip_serializing_if on the field
unreachable!()
};
template.serialize(serializer)
}
impl Default for MachineConfig {
fn default() -> Self {
Self {
vcpu_count: 1,
mem_size_mib: DEFAULT_MEM_SIZE_MIB,
smt: false,
cpu_template: None,
track_dirty_pages: false,
huge_pages: HugePageConfig::None,
#[cfg(feature = "gdb")]
gdb_socket_path: None,
}
}
}
/// Struct used in PATCH `/machine-config` API call.
/// Used to update `MachineConfig` in `VmResources`.
/// This struct mirrors all the fields in `MachineConfig`.
/// All fields are optional, but at least one needs to be specified.
/// If a field is `Some(value)` then we assume an update is requested
/// for that field.
#[derive(Clone, Default, Debug, PartialEq, Eq, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct MachineConfigUpdate {
/// Number of vcpu to start.
#[serde(default)]
pub vcpu_count: Option<u8>,
/// The memory size in MiB.
#[serde(default)]
pub mem_size_mib: Option<usize>,
/// Enables or disabled SMT.
#[serde(default)]
pub smt: Option<bool>,
/// A CPU template that it is used to filter the CPU features exposed to the guest.
#[serde(default)]
pub cpu_template: Option<StaticCpuTemplate>,
/// Enables or disables dirty page tracking. Enabling allows incremental snapshots.
#[serde(default)]
pub track_dirty_pages: Option<bool>,
/// Configures what page size Firecracker should use to back guest memory.
#[serde(default)]
pub huge_pages: Option<HugePageConfig>,
/// GDB socket address.
#[cfg(feature = "gdb")]
#[serde(default)]
pub gdb_socket_path: Option<String>,
}
impl MachineConfigUpdate {
/// Checks if the update request contains any data.
/// Returns `true` if all fields are set to `None` which means that there is nothing
/// to be updated.
pub fn is_empty(&self) -> bool {
self == &Default::default()
}
}
impl From<MachineConfig> for MachineConfigUpdate {
fn from(cfg: MachineConfig) -> Self {
MachineConfigUpdate {
vcpu_count: Some(cfg.vcpu_count),
mem_size_mib: Some(cfg.mem_size_mib),
smt: Some(cfg.smt),
cpu_template: cfg.static_template(),
track_dirty_pages: Some(cfg.track_dirty_pages),
huge_pages: Some(cfg.huge_pages),
#[cfg(feature = "gdb")]
gdb_socket_path: cfg.gdb_socket_path,
}
}
}
impl MachineConfig {
/// Sets cpu tempalte field to `CpuTemplateType::Custom(cpu_template)`.
pub fn set_custom_cpu_template(&mut self, cpu_template: CustomCpuTemplate) {
self.cpu_template = Some(CpuTemplateType::Custom(cpu_template));
}
fn static_template(&self) -> Option<StaticCpuTemplate> {
match self.cpu_template {
Some(CpuTemplateType::Static(template)) => Some(template),
_ => None,
}
}
/// Updates [`MachineConfig`] with [`MachineConfigUpdate`].
/// Mapping for cpu template update:
/// StaticCpuTemplate::None -> None
/// StaticCpuTemplate::Other -> Some(CustomCpuTemplate::Static(Other)),
/// Returns the updated `MachineConfig` object.
pub fn update(
&self,
update: &MachineConfigUpdate,
) -> Result<MachineConfig, MachineConfigError> {
let vcpu_count = update.vcpu_count.unwrap_or(self.vcpu_count);
let smt = update.smt.unwrap_or(self.smt);
#[cfg(target_arch = "aarch64")]
if smt {
return Err(MachineConfigError::SmtNotSupported);
}
if vcpu_count == 0 || vcpu_count > MAX_SUPPORTED_VCPUS {
return Err(MachineConfigError::InvalidVcpuCount);
}
// If SMT is enabled or is to be enabled in this call
// only allow vcpu count to be 1 or even.
if smt && vcpu_count > 1 && vcpu_count % 2 == 1 {
return Err(MachineConfigError::InvalidVcpuCount);
}
let mem_size_mib = update.mem_size_mib.unwrap_or(self.mem_size_mib);
let page_config = update.huge_pages.unwrap_or(self.huge_pages);
if mem_size_mib == 0 || !page_config.is_valid_mem_size(mem_size_mib) {
return Err(MachineConfigError::InvalidMemorySize);
}
let cpu_template = match update.cpu_template {
None => self.cpu_template.clone(),
Some(StaticCpuTemplate::None) => None,
Some(other) => Some(CpuTemplateType::Static(other)),
};
Ok(MachineConfig {
vcpu_count,
mem_size_mib,
smt,
cpu_template,
track_dirty_pages: update.track_dirty_pages.unwrap_or(self.track_dirty_pages),
huge_pages: page_config,
#[cfg(feature = "gdb")]
gdb_socket_path: update.gdb_socket_path.clone(),
})
}
}
#[cfg(test)]
mod tests {
use crate::cpu_config::templates::{CpuTemplateType, CustomCpuTemplate, StaticCpuTemplate};
use crate::vmm_config::machine_config::MachineConfig;
// Ensure the special (de)serialization logic for the cpu_template field works:
// only static cpu templates can be specified via the machine-config endpoint, but
// we still cram custom cpu templates into the MachineConfig struct if they're set otherwise
// Ensure that during (de)serialization we preserve static templates, but we set custom
// templates to None
#[test]
fn test_serialize_machine_config() {
#[cfg(target_arch = "aarch64")]
const TEMPLATE: StaticCpuTemplate = StaticCpuTemplate::V1N1;
#[cfg(target_arch = "x86_64")]
const TEMPLATE: StaticCpuTemplate = StaticCpuTemplate::T2S;
let mconfig = MachineConfig {
cpu_template: None,
..Default::default()
};
let serialized = serde_json::to_string(&mconfig).unwrap();
let deserialized = serde_json::from_str::<MachineConfig>(&serialized).unwrap();
assert!(deserialized.cpu_template.is_none());
let mconfig = MachineConfig {
cpu_template: Some(CpuTemplateType::Static(TEMPLATE)),
..Default::default()
};
let serialized = serde_json::to_string(&mconfig).unwrap();
let deserialized = serde_json::from_str::<MachineConfig>(&serialized).unwrap();
assert_eq!(
deserialized.cpu_template,
Some(CpuTemplateType::Static(TEMPLATE))
);
let mconfig = MachineConfig {
cpu_template: Some(CpuTemplateType::Custom(CustomCpuTemplate::default())),
..Default::default()
};
let serialized = serde_json::to_string(&mconfig).unwrap();
let deserialized = serde_json::from_str::<MachineConfig>(&serialized).unwrap();
assert!(deserialized.cpu_template.is_none());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/serial.rs | src/vmm/src/vmm_config/serial.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::path::PathBuf;
use serde::Deserialize;
/// The body of a PUT /serial request.
#[derive(Debug, PartialEq, Eq, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct SerialConfig {
/// Named pipe or file used as output for guest serial console.
pub serial_out_path: Option<PathBuf>,
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/pmem.rs | src/vmm/src/vmm_config/pmem.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use crate::devices::virtio::pmem::device::{Pmem, PmemError};
/// Errors associated wit the operations allowed on a pmem device
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum PmemConfigError {
/// Attempt to add pmem as a root device while the root device defined as a block device
AddingSecondRootDevice,
/// A root pmem device already exist
RootPmemDeviceAlreadyExist,
/// Unable to create the virtio-pmem device: {0}
CreateDevice(#[from] PmemError),
/// Error accessing underlying file: {0}
File(std::io::Error),
}
/// Use this structure to setup a Pmem device before boothing the kernel.
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct PmemConfig {
/// Unique identifier of the device.
pub id: String,
/// Path of the drive.
pub path_on_host: String,
/// Use this pmem device for rootfs
#[serde(default)]
pub root_device: bool,
/// Map the file as read only
#[serde(default)]
pub read_only: bool,
}
/// Wrapper for the collection that holds all the Pmem devices.
#[derive(Debug, Default)]
pub struct PmemBuilder {
/// The list of pmem devices
pub devices: Vec<Arc<Mutex<Pmem>>>,
}
impl PmemBuilder {
/// Specifies whether there is a root block device already present in the list.
pub fn has_root_device(&self) -> bool {
self.devices
.iter()
.any(|d| d.lock().unwrap().config.root_device)
}
/// Build a device from the config
pub fn build(
&mut self,
config: PmemConfig,
has_block_root: bool,
) -> Result<(), PmemConfigError> {
if config.root_device && has_block_root {
return Err(PmemConfigError::AddingSecondRootDevice);
}
let position = self
.devices
.iter()
.position(|d| d.lock().unwrap().config.id == config.id);
if let Some(index) = position {
if !self.devices[index].lock().unwrap().config.root_device
&& config.root_device
&& self.has_root_device()
{
return Err(PmemConfigError::RootPmemDeviceAlreadyExist);
}
let pmem = Pmem::new(config)?;
let pmem = Arc::new(Mutex::new(pmem));
self.devices[index] = pmem;
} else {
if config.root_device && self.has_root_device() {
return Err(PmemConfigError::RootPmemDeviceAlreadyExist);
}
let pmem = Pmem::new(config)?;
let pmem = Arc::new(Mutex::new(pmem));
self.devices.push(pmem);
}
Ok(())
}
/// Adds an existing pmem device in the builder. This function should
/// only be used during snapshot restoration process and should add
/// devices in the same order as they were in the original VM.
pub fn add_device(&mut self, device: Arc<Mutex<Pmem>>) {
self.devices.push(device);
}
/// Returns a vec with the structures used to configure the devices.
pub fn configs(&self) -> Vec<PmemConfig> {
self.devices
.iter()
.map(|b| b.lock().unwrap().config.clone())
.collect()
}
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
#[test]
fn test_pmem_builder_build() {
let mut builder = PmemBuilder::default();
let dummy_file = TempFile::new().unwrap();
dummy_file.as_file().set_len(Pmem::ALIGNMENT).unwrap();
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let mut config = PmemConfig {
id: "1".into(),
path_on_host: dummy_path,
root_device: true,
read_only: false,
};
builder.build(config.clone(), false).unwrap();
assert_eq!(builder.devices.len(), 1);
assert!(builder.has_root_device());
// First device got replaced with new one
config.root_device = false;
builder.build(config, false).unwrap();
assert_eq!(builder.devices.len(), 1);
assert!(!builder.has_root_device());
}
#[test]
fn test_pmem_builder_build_seconde_root() {
let mut builder = PmemBuilder::default();
let dummy_file = TempFile::new().unwrap();
dummy_file.as_file().set_len(Pmem::ALIGNMENT).unwrap();
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let mut config = PmemConfig {
id: "1".into(),
path_on_host: dummy_path,
root_device: true,
read_only: false,
};
builder.build(config.clone(), false).unwrap();
config.id = "2".into();
assert!(matches!(
builder.build(config.clone(), false).unwrap_err(),
PmemConfigError::RootPmemDeviceAlreadyExist,
));
}
#[test]
fn test_pmem_builder_build_root_with_block_already_a_root() {
let mut builder = PmemBuilder::default();
let dummy_file = TempFile::new().unwrap();
dummy_file.as_file().set_len(Pmem::ALIGNMENT).unwrap();
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let config = PmemConfig {
id: "1".into(),
path_on_host: dummy_path,
root_device: true,
read_only: false,
};
assert!(matches!(
builder.build(config, true).unwrap_err(),
PmemConfigError::AddingSecondRootDevice,
));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/instance_info.rs | src/vmm/src/vmm_config/instance_info.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::{self, Display, Formatter};
use serde::{Serialize, ser};
/// Enumerates microVM runtime states.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub enum VmState {
/// Vm not started (yet)
#[default]
NotStarted,
/// Vm is Paused
Paused,
/// Vm is running
Running,
}
impl Display for VmState {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
VmState::NotStarted => write!(f, "Not started"),
VmState::Paused => write!(f, "Paused"),
VmState::Running => write!(f, "Running"),
}
}
}
impl ser::Serialize for VmState {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
self.to_string().serialize(serializer)
}
}
/// Serializable struct that contains general information about the microVM.
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct InstanceInfo {
/// The ID of the microVM.
pub id: String,
/// Whether the microVM is not started/running/paused.
pub state: VmState,
/// The version of the VMM that runs the microVM.
pub vmm_version: String,
/// The name of the application that runs the microVM.
pub app_name: String,
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.