repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/acpi/mod.rs | src/vmm/src/acpi/mod.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use acpi_tables::fadt::{FADT_F_HW_REDUCED_ACPI, FADT_F_PWR_BUTTON, FADT_F_SLP_BUTTON};
use acpi_tables::{Aml, Dsdt, Fadt, Madt, Mcfg, Rsdp, Sdt, Xsdt, aml};
use log::{debug, error};
use vm_allocator::AllocPolicy;
use crate::Vcpu;
use crate::acpi::x86_64::{
apic_addr, rsdp_addr, setup_arch_dsdt, setup_arch_fadt, setup_interrupt_controllers,
};
use crate::arch::x86_64::layout;
use crate::device_manager::DeviceManager;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
use crate::vstate::resources::ResourceAllocator;
mod x86_64;
// Our (Original Equipment Manufacturer" (OEM) name. OEM is how ACPI names the manufacturer of the
// hardware that is exposed to the OS, through ACPI tables. The OEM name is passed in every ACPI
// table, to let the OS know that we are the owner of the table.
const OEM_ID: [u8; 6] = *b"FIRECK";
// In reality the OEM revision is per table and it defines the revision of the OEM's implementation
// of the particular ACPI table. For our purpose, we can set it to a fixed value for all the tables
const OEM_REVISION: u32 = 0;
// This is needed for an entry in the FADT table. Populating this entry in FADT is a way to let the
// guest know that it runs within a Firecracker microVM.
const HYPERVISOR_VENDOR_ID: [u8; 8] = *b"FIRECKVM";
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// Error type for ACPI related operations
pub enum AcpiError {
/// Could not allocate resources: {0}
VmAllocator(#[from] vm_allocator::Error),
/// ACPI tables error: {0}
AcpiTables(#[from] acpi_tables::AcpiError),
/// Error creating AML bytecode: {0}
AmlError(#[from] aml::AmlError),
}
/// Helper type that holds the guest memory in which we write the tables in and a resource
/// allocator for allocating space for the tables
struct AcpiTableWriter<'a> {
mem: &'a GuestMemoryMmap,
}
impl AcpiTableWriter<'_> {
/// Write a table in guest memory
///
/// This will allocate enough space inside guest memory and write the table in the allocated
/// buffer. It returns the address in which it wrote the table.
fn write_acpi_table<S>(
&mut self,
resource_allocator: &mut ResourceAllocator,
table: &mut S,
) -> Result<u64, AcpiError>
where
S: Sdt,
{
let addr = resource_allocator.allocate_system_memory(
table.len().try_into().unwrap(),
1,
AllocPolicy::FirstMatch,
)?;
table
.write_to_guest(self.mem, GuestAddress(addr))
.inspect_err(|err| error!("acpi: Could not write table in guest memory: {err}"))?;
debug!(
"acpi: Wrote table ({} bytes) at address: {:#010x}",
table.len(),
addr
);
Ok(addr)
}
/// Build the DSDT table for the guest
fn build_dsdt(
&mut self,
device_manager: &mut DeviceManager,
resource_allocator: &mut ResourceAllocator,
) -> Result<u64, AcpiError> {
let mut dsdt_data = Vec::new();
// Virtio-devices DSDT data
dsdt_data.extend_from_slice(&device_manager.mmio_devices.dsdt_data);
// Add GED and VMGenID AML data.
device_manager
.acpi_devices
.append_aml_bytes(&mut dsdt_data)?;
if let Some(pci_segment) = &device_manager.pci_devices.pci_segment {
pci_segment.append_aml_bytes(&mut dsdt_data)?;
}
// Architecture specific DSDT data
setup_arch_dsdt(&mut dsdt_data)?;
let mut dsdt = Dsdt::new(OEM_ID, *b"FCVMDSDT", OEM_REVISION, dsdt_data);
self.write_acpi_table(resource_allocator, &mut dsdt)
}
/// Build the FADT table for the guest
///
/// This includes a pointer with the location of the DSDT in guest memory
fn build_fadt(
&mut self,
resource_allocator: &mut ResourceAllocator,
dsdt_addr: u64,
) -> Result<u64, AcpiError> {
let mut fadt = Fadt::new(OEM_ID, *b"FCVMFADT", OEM_REVISION);
fadt.set_hypervisor_vendor_id(HYPERVISOR_VENDOR_ID);
fadt.set_x_dsdt(dsdt_addr);
fadt.set_flags(
(1 << FADT_F_HW_REDUCED_ACPI) | (1 << FADT_F_PWR_BUTTON) | (1 << FADT_F_SLP_BUTTON),
);
setup_arch_fadt(&mut fadt);
self.write_acpi_table(resource_allocator, &mut fadt)
}
/// Build the MADT table for the guest
///
/// This includes information about the interrupt controllers supported in the platform
fn build_madt(
&mut self,
resource_allocator: &mut ResourceAllocator,
nr_vcpus: u8,
) -> Result<u64, AcpiError> {
let mut madt = Madt::new(
OEM_ID,
*b"FCVMMADT",
OEM_REVISION,
apic_addr(),
setup_interrupt_controllers(nr_vcpus),
);
self.write_acpi_table(resource_allocator, &mut madt)
}
/// Build the XSDT table for the guest
///
/// Currently, we pass to the guest just FADT and MADT tables.
fn build_xsdt(
&mut self,
resource_allocator: &mut ResourceAllocator,
fadt_addr: u64,
madt_addr: u64,
mcfg_addr: u64,
) -> Result<u64, AcpiError> {
let mut xsdt = Xsdt::new(
OEM_ID,
*b"FCMVXSDT",
OEM_REVISION,
vec![fadt_addr, madt_addr, mcfg_addr],
);
self.write_acpi_table(resource_allocator, &mut xsdt)
}
/// Build the MCFG table for the guest.
fn build_mcfg(
&mut self,
resource_allocator: &mut ResourceAllocator,
pci_mmio_config_addr: u64,
) -> Result<u64, AcpiError> {
let mut mcfg = Mcfg::new(OEM_ID, *b"FCMVMCFG", OEM_REVISION, pci_mmio_config_addr);
self.write_acpi_table(resource_allocator, &mut mcfg)
}
/// Build the RSDP pointer for the guest.
///
/// This will build the RSDP pointer which points to the XSDT table and write it in guest
/// memory. The address in which we write RSDP is pre-determined for every architecture.
/// We will not allocate arbitrary memory for it
fn build_rsdp(&mut self, xsdt_addr: u64) -> Result<(), AcpiError> {
let mut rsdp = Rsdp::new(OEM_ID, xsdt_addr);
rsdp.write_to_guest(self.mem, rsdp_addr())
.inspect_err(|err| error!("acpi: Could not write RSDP in guest memory: {err}"))?;
debug!(
"acpi: Wrote RSDP ({} bytes) at address: {:#010x}",
rsdp.len(),
rsdp_addr().0
);
Ok(())
}
}
/// Create ACPI tables for the guest
///
/// This will create the ACPI tables needed to describe to the guest OS the available hardware,
/// such as interrupt controllers, vCPUs and VirtIO devices.
pub(crate) fn create_acpi_tables(
mem: &GuestMemoryMmap,
device_manager: &mut DeviceManager,
resource_allocator: &mut ResourceAllocator,
vcpus: &[Vcpu],
) -> Result<(), AcpiError> {
let mut writer = AcpiTableWriter { mem };
let dsdt_addr = writer.build_dsdt(device_manager, resource_allocator)?;
let fadt_addr = writer.build_fadt(resource_allocator, dsdt_addr)?;
let madt_addr = writer.build_madt(resource_allocator, vcpus.len().try_into().unwrap())?;
let mcfg_addr = writer.build_mcfg(resource_allocator, layout::PCI_MMCONFIG_START)?;
let xsdt_addr = writer.build_xsdt(resource_allocator, fadt_addr, madt_addr, mcfg_addr)?;
writer.build_rsdp(xsdt_addr)
}
#[cfg(test)]
mod tests {
use acpi_tables::Sdt;
use vm_memory::Bytes;
use crate::acpi::{AcpiError, AcpiTableWriter};
use crate::arch::x86_64::layout::{SYSTEM_MEM_SIZE, SYSTEM_MEM_START};
use crate::builder::tests::default_vmm;
use crate::utils::u64_to_usize;
use crate::vstate::resources::ResourceAllocator;
use crate::vstate::vm::tests::setup_vm_with_memory;
struct MockSdt(Vec<u8>);
impl Sdt for MockSdt {
fn len(&self) -> usize {
self.0.len()
}
fn write_to_guest<M: vm_memory::GuestMemory>(
&mut self,
mem: &M,
address: vm_memory::GuestAddress,
) -> acpi_tables::Result<()> {
mem.write_slice(&self.0, address)?;
Ok(())
}
}
// Currently we are allocating up to SYSTEM_MEM_SIZE memory for ACPI tables. We are allocating
// using the FirstMatch policy, with an 1 byte alignment. This test checks that we are able to
// allocate up to this size, and get back the expected addresses.
#[test]
fn test_write_acpi_table_memory_allocation() {
// A mocke Vmm object with 128MBs of memory
let vmm = default_vmm();
let mut writer = AcpiTableWriter {
mem: vmm.vm.guest_memory(),
};
let mut resource_allocator = vmm.vm.resource_allocator();
// This should succeed
let mut sdt = MockSdt(vec![0; 4096]);
let addr = writer
.write_acpi_table(&mut resource_allocator, &mut sdt)
.unwrap();
assert_eq!(addr, SYSTEM_MEM_START);
// Let's try to write two 4K pages plus one byte
let mut sdt = MockSdt(vec![0; usize::try_from(SYSTEM_MEM_SIZE + 1).unwrap()]);
let err = writer
.write_acpi_table(&mut resource_allocator, &mut sdt)
.unwrap_err();
assert!(
matches!(
err,
AcpiError::VmAllocator(vm_allocator::Error::ResourceNotAvailable)
),
"{:?}",
err
);
// We are allocating memory for tables with alignment of 1 byte. All of these should
// succeed.
let mut sdt = MockSdt(vec![0; 5]);
let addr = writer
.write_acpi_table(&mut resource_allocator, &mut sdt)
.unwrap();
assert_eq!(addr, SYSTEM_MEM_START + 4096);
let mut sdt = MockSdt(vec![0; 2]);
let addr = writer
.write_acpi_table(&mut resource_allocator, &mut sdt)
.unwrap();
assert_eq!(addr, SYSTEM_MEM_START + 4101);
let mut sdt = MockSdt(vec![0; 4]);
let addr = writer
.write_acpi_table(&mut resource_allocator, &mut sdt)
.unwrap();
assert_eq!(addr, SYSTEM_MEM_START + 4103);
let mut sdt = MockSdt(vec![0; 8]);
let addr = writer
.write_acpi_table(&mut resource_allocator, &mut sdt)
.unwrap();
assert_eq!(addr, SYSTEM_MEM_START + 4107);
let mut sdt = MockSdt(vec![0; 16]);
let addr = writer
.write_acpi_table(&mut resource_allocator, &mut sdt)
.unwrap();
assert_eq!(addr, SYSTEM_MEM_START + 4115);
}
// If, for whatever weird reason, we end up with microVM that has less memory than the maximum
// address we allocate for ACPI tables, we would be able to allocate the tables but we would
// not be able to write them. This is practically impossible in our case. If we get such a
// guest memory, we won't be able to load the guest kernel, but the function does
// return an error on this case, so let's just check that in case any of these assumptions
// change in the future.
#[test]
fn test_write_acpi_table_small_memory() {
let (_, vm) = setup_vm_with_memory(u64_to_usize(SYSTEM_MEM_START + SYSTEM_MEM_SIZE - 4096));
let mut writer = AcpiTableWriter {
mem: vm.guest_memory(),
};
let mut resource_allocator = ResourceAllocator::new();
let mut sdt = MockSdt(vec![0; usize::try_from(SYSTEM_MEM_SIZE).unwrap()]);
let err = writer
.write_acpi_table(&mut resource_allocator, &mut sdt)
.unwrap_err();
assert!(
matches!(
err,
AcpiError::AcpiTables(acpi_tables::AcpiError::GuestMemory(
vm_memory::GuestMemoryError::PartialBuffer {
expected: 263168, // SYSTEM_MEM_SIZE
completed: 259072 // SYSTEM_MEM_SIZE - 4096
},
))
),
"{:?}",
err
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/utils/sm.rs | src/vmm/src/utils/sm.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
/// Simple abstraction of a state machine.
///
/// `StateMachine<T>` is a wrapper over `T` that also encodes state information for `T`.
///
/// Each state for `T` is represented by a `StateFn<T>` which is a function that acts as
/// the state handler for that particular state of `T`.
///
/// `StateFn<T>` returns exactly one other `StateMachine<T>` thus each state gets clearly
/// defined transitions to other states.
pub struct StateMachine<T> {
function: Option<StateFn<T>>,
}
impl<T> Debug for StateMachine<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("StateMachine")
.field("function", &self.function.map(|f| f as usize))
.finish()
}
}
/// Type representing a state handler of a `StateMachine<T>` machine. Each state handler
/// is a function from `T` that handles a specific state of `T`.
type StateFn<T> = fn(&mut T) -> StateMachine<T>;
impl<T: Debug> StateMachine<T> {
/// Creates a new state wrapper.
///
/// # Arguments
///
/// `function` - the state handler for this state.
pub fn new(function: Option<StateFn<T>>) -> StateMachine<T> {
StateMachine { function }
}
/// Creates a new state wrapper that has further possible transitions.
///
/// # Arguments
///
/// `function` - the state handler for this state.
pub fn next(function: StateFn<T>) -> StateMachine<T> {
StateMachine::new(Some(function))
}
/// Creates a new state wrapper that has no further transitions. The state machine
/// will finish after running this handler.
///
/// # Arguments
///
/// `function` - the state handler for this last state.
pub fn finish() -> StateMachine<T> {
StateMachine::new(None)
}
/// Runs a state machine for `T` starting from the provided state.
///
/// # Arguments
///
/// `machine` - a mutable reference to the object running through the various states.
/// `starting_state_fn` - a `fn(&mut T) -> StateMachine<T>` that should be the handler for
/// the initial state.
pub fn run(machine: &mut T, starting_state_fn: StateFn<T>) {
// Start off in the `starting_state` state.
let mut state_machine = StateMachine::new(Some(starting_state_fn));
// While current state is not a final/end state, keep churning.
while let Some(state_fn) = state_machine.function {
// Run the current state handler, and get the next one.
state_machine = state_fn(machine);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
// DummyMachine with states `s1`, `s2` and `s3`.
#[derive(Debug)]
struct DummyMachine {
private_data_s1: bool,
private_data_s2: bool,
private_data_s3: bool,
}
impl DummyMachine {
fn new() -> Self {
DummyMachine {
private_data_s1: false,
private_data_s2: false,
private_data_s3: false,
}
}
// DummyMachine functions here.
// Simple state-machine: start->s1->s2->s3->done.
fn run(&mut self) {
// Verify the machine has not run yet.
assert!(!self.private_data_s1);
assert!(!self.private_data_s2);
assert!(!self.private_data_s3);
// Run the state-machine.
StateMachine::run(self, Self::s1);
// Verify the machine went through all states.
assert!(self.private_data_s1);
assert!(self.private_data_s2);
assert!(self.private_data_s3);
}
fn s1(&mut self) -> StateMachine<Self> {
// Verify private data mutates along with the states.
assert!(!self.private_data_s1);
self.private_data_s1 = true;
StateMachine::next(Self::s2)
}
fn s2(&mut self) -> StateMachine<Self> {
// Verify private data mutates along with the states.
assert!(!self.private_data_s2);
self.private_data_s2 = true;
StateMachine::next(Self::s3)
}
fn s3(&mut self) -> StateMachine<Self> {
// Verify private data mutates along with the states.
assert!(!self.private_data_s3);
self.private_data_s3 = true;
// The machine ends here, adding `s1` as next state to validate this.
StateMachine::finish()
}
}
#[test]
fn test_sm() {
let mut machine = DummyMachine::new();
machine.run();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/utils/signal.rs | src/vmm/src/utils/signal.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use libc::c_int;
pub use vmm_sys_util::signal::*;
// SAFETY: these are valid libc functions
unsafe extern "C" {
// SAFETY: Function has no invariants that can be broken.
safe fn __libc_current_sigrtmin() -> c_int;
// SAFETY: Function has no invariants that can be broken.
safe fn __libc_current_sigrtmax() -> c_int;
}
/// Sigrtmin
pub fn sigrtmin() -> c_int {
__libc_current_sigrtmin()
}
/// Sigrtmax
pub fn sigrtmax() -> c_int {
__libc_current_sigrtmax()
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/utils/mod.rs | src/vmm/src/utils/mod.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Module with helpers to read/write bytes into slices
pub mod byte_order;
/// Module with network related helpers
pub mod net;
/// Module with external libc functions
pub mod signal;
/// Module with state machine
pub mod sm;
use std::fs::{File, OpenOptions};
use std::num::Wrapping;
use std::os::unix::fs::OpenOptionsExt;
use std::path::Path;
use std::result::Result;
use libc::O_NONBLOCK;
/// How many bits to left-shift by to convert MiB to bytes
const MIB_TO_BYTES_SHIFT: usize = 20;
/// Return the default page size of the platform, in bytes.
pub fn get_page_size() -> Result<usize, vmm_sys_util::errno::Error> {
// SAFETY: Safe because the parameters are valid.
match unsafe { libc::sysconf(libc::_SC_PAGESIZE) } {
-1 => Err(vmm_sys_util::errno::Error::last()),
ps => Ok(usize::try_from(ps).unwrap()),
}
}
/// Safely converts a u64 value to a usize value.
/// This bypasses the Clippy lint check because we only support 64-bit platforms.
#[cfg(target_pointer_width = "64")]
#[inline]
#[allow(clippy::cast_possible_truncation)]
pub const fn u64_to_usize(num: u64) -> usize {
num as usize
}
/// Safely converts a usize value to a u64 value.
/// This bypasses the Clippy lint check because we only support 64-bit platforms.
#[cfg(target_pointer_width = "64")]
#[inline]
#[allow(clippy::cast_possible_truncation)]
pub const fn usize_to_u64(num: usize) -> u64 {
num as u64
}
/// Converts a usize into a wrapping u32.
#[inline]
pub const fn wrap_usize_to_u32(num: usize) -> Wrapping<u32> {
Wrapping(((num as u64) & 0xFFFFFFFF) as u32)
}
/// Converts MiB to Bytes
pub const fn mib_to_bytes(mib: usize) -> usize {
mib << MIB_TO_BYTES_SHIFT
}
/// Converts Bytes to MiB, truncating any remainder
pub const fn bytes_to_mib(bytes: usize) -> usize {
bytes >> MIB_TO_BYTES_SHIFT
}
/// Align address up to the aligment.
pub const fn align_up(addr: u64, align: u64) -> u64 {
debug_assert!(align != 0);
(addr + align - 1) & !(align - 1)
}
/// Align address down to the aligment.
pub const fn align_down(addr: u64, align: u64) -> u64 {
debug_assert!(align != 0);
addr & !(align - 1)
}
/// Create and open a File for writing to it.
/// In case we open a FIFO, in order to not block the instance if nobody is consuming the message
/// that is flushed to it, we are opening it with `O_NONBLOCK` flag.
/// In this case, writing to a pipe will start failing when reaching 64K of unconsumed content.
pub fn open_file_write_nonblock(path: &Path) -> Result<File, std::io::Error> {
OpenOptions::new()
.custom_flags(O_NONBLOCK)
.create(true)
.write(true)
.open(path)
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/utils/byte_order.rs | src/vmm/src/utils/byte_order.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
macro_rules! generate_read_fn {
($fn_name: ident, $data_type: ty, $byte_type: ty, $type_size: expr, $endian_type: ident) => {
/// Read bytes from the slice
pub fn $fn_name(input: &[$byte_type]) -> $data_type {
let mut array = [0u8; std::mem::size_of::<$data_type>()];
let how_many = input.len().min(std::mem::size_of::<$data_type>());
array[..how_many].copy_from_slice(&input[..how_many]);
<$data_type>::$endian_type(array)
}
};
}
macro_rules! generate_write_fn {
($fn_name: ident, $data_type: ty, $byte_type: ty, $endian_type: ident) => {
/// Write bytes to the slice
pub fn $fn_name(buf: &mut [$byte_type], n: $data_type) {
let bytes = n.$endian_type();
let how_much = buf.len().min(bytes.len());
buf[..how_much].copy_from_slice(&bytes[..how_much]);
}
};
}
generate_read_fn!(read_le_u32, u32, u8, 4, from_le_bytes);
generate_read_fn!(read_le_u64, u64, u8, 8, from_le_bytes);
generate_read_fn!(read_be_u16, u16, u8, 2, from_be_bytes);
generate_read_fn!(read_be_u32, u32, u8, 4, from_be_bytes);
generate_write_fn!(write_le_u32, u32, u8, to_le_bytes);
generate_write_fn!(write_le_u64, u64, u8, to_le_bytes);
generate_write_fn!(write_be_u16, u16, u8, to_be_bytes);
generate_write_fn!(write_be_u32, u32, u8, to_be_bytes);
#[cfg(test)]
mod tests {
use super::*;
macro_rules! byte_order_test_read_write {
($test_name: ident, $write_fn_name: ident, $read_fn_name: ident, $is_be: expr, $data_type: ty) => {
#[test]
fn $test_name() {
#[allow(overflowing_literals)]
let test_cases = [
(
0x0123_4567_89AB_CDEF as u64,
[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef],
),
(
0x0000_0000_0000_0000 as u64,
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],
),
(
0x1923_2345_ABF3_CCD4 as u64,
[0x19, 0x23, 0x23, 0x45, 0xAB, 0xF3, 0xCC, 0xD4],
),
(
0x0FF0_0FF0_0FF0_0FF0 as u64,
[0x0F, 0xF0, 0x0F, 0xF0, 0x0F, 0xF0, 0x0F, 0xF0],
),
(
0xFFFF_FFFF_FFFF_FFFF as u64,
[0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF],
),
(
0x89AB_12D4_C2D2_09BB as u64,
[0x89, 0xAB, 0x12, 0xD4, 0xC2, 0xD2, 0x09, 0xBB],
),
];
let type_size = std::mem::size_of::<$data_type>();
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
for (test_val, v_arr) in &test_cases {
let v = *test_val as $data_type;
let cmp_iter: Box<dyn Iterator<Item = _>> = if $is_be {
Box::new(v_arr[(8 - type_size)..].iter())
} else {
Box::new(v_arr.iter().rev())
};
// test write
let mut write_arr = vec![Default::default(); type_size];
$write_fn_name(&mut write_arr, v);
for (cmp, cur) in cmp_iter.zip(write_arr.iter()) {
assert_eq!(*cmp, *cur as u8)
}
// test read
let read_val = $read_fn_name(&write_arr);
assert_eq!(v, read_val);
}
}
};
}
byte_order_test_read_write!(test_le_u32, write_le_u32, read_le_u32, false, u32);
byte_order_test_read_write!(test_le_u64, write_le_u64, read_le_u64, false, u64);
byte_order_test_read_write!(test_be_u16, write_be_u16, read_be_u16, true, u16);
byte_order_test_read_write!(test_be_u32, write_be_u32, read_be_u32, true, u32);
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/utils/net/mod.rs | src/vmm/src/utils/net/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
#![warn(missing_docs)]
//! # Network-related utilities
//!
//! Provides tools for representing and handling network related concepts like MAC addresses and
//! network interfaces.
/// Provides IPv4 address utility methods.
pub mod ipv4addr;
pub mod mac;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/utils/net/mac.rs | src/vmm/src/utils/net/mac.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Contains support for parsing and constructing MAC addresses
//! More information about MAC addresses can be found [here]
//!
//! [here]: https://en.wikipedia.org/wiki/MAC_address
use std::fmt;
use std::str::FromStr;
use serde::de::{Deserialize, Deserializer, Error};
use serde::ser::{Serialize, Serializer};
/// The number of tuples (the ones separated by ":") contained in a MAC address.
pub const MAC_ADDR_LEN: u8 = 6;
/// Represents a MAC address
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[repr(transparent)]
/// Representation of a MAC address.
pub struct MacAddr {
bytes: [u8; MAC_ADDR_LEN as usize],
}
impl fmt::Display for MacAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let b = &self.bytes;
write!(
f,
"{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}",
b[0], b[1], b[2], b[3], b[4], b[5]
)
}
}
impl From<[u8; 6]> for MacAddr {
fn from(bytes: [u8; 6]) -> Self {
Self { bytes }
}
}
impl From<MacAddr> for [u8; 6] {
fn from(mac: MacAddr) -> Self {
mac.bytes
}
}
impl FromStr for MacAddr {
type Err = String;
/// Try to turn a `&str` into a `MacAddr` object. The method will return the `str` that failed
/// to be parsed.
/// # Arguments
///
/// * `s` - reference that can be converted to &str.
fn from_str(s: &str) -> Result<Self, Self::Err> {
let v: Vec<&str> = s.split(':').collect();
let mut bytes = [0u8; MAC_ADDR_LEN as usize];
if v.len() != MAC_ADDR_LEN as usize {
return Err(String::from(s));
}
for i in 0..MAC_ADDR_LEN as usize {
if v[i].len() != 2 {
return Err(String::from(s));
}
bytes[i] = u8::from_str_radix(v[i], 16).map_err(|_| String::from(s))?;
}
Ok(MacAddr { bytes })
}
}
impl MacAddr {
/// Create a `MacAddr` from a slice.
/// Does not check whether `src.len()` == `MAC_ADDR_LEN`.
/// # Arguments
///
/// * `src` - slice from which to copy MAC address content.
#[inline]
pub fn from_bytes_unchecked(src: &[u8]) -> MacAddr {
// TODO: using something like std::mem::uninitialized could avoid the extra initialization,
// if this ever becomes a performance bottleneck.
let mut bytes = [0u8; MAC_ADDR_LEN as usize];
bytes[..].copy_from_slice(src);
MacAddr { bytes }
}
/// Return the underlying content of this `MacAddr` in bytes.
#[inline]
pub fn get_bytes(&self) -> &[u8] {
&self.bytes
}
}
impl Serialize for MacAddr {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(&self.to_string(), serializer)
}
}
impl<'de> Deserialize<'de> for MacAddr {
fn deserialize<D>(deserializer: D) -> Result<MacAddr, D::Error>
where
D: Deserializer<'de>,
{
let s = <std::string::String as Deserialize>::deserialize(deserializer)?;
MacAddr::from_str(&s).map_err(|_| D::Error::custom("The provided MAC address is invalid."))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mac_addr() {
// too long
MacAddr::from_str("aa:aa:aa:aa:aa:aa:aa").unwrap_err();
// invalid hex
MacAddr::from_str("aa:aa:aa:aa:aa:ax").unwrap_err();
// single digit mac address component should be invalid
MacAddr::from_str("aa:aa:aa:aa:aa:b").unwrap_err();
// components with more than two digits should also be invalid
MacAddr::from_str("aa:aa:aa:aa:aa:bbb").unwrap_err();
let mac = MacAddr::from_str("12:34:56:78:9a:BC").unwrap();
println!("parsed MAC address: {}", mac);
let bytes = mac.get_bytes();
assert_eq!(bytes, [0x12u8, 0x34, 0x56, 0x78, 0x9a, 0xbc]);
}
#[test]
fn test_mac_addr_serialization_and_deserialization() {
let mac: MacAddr =
serde_json::from_str("\"12:34:56:78:9a:bc\"").expect("MacAddr deserialization failed.");
let bytes = mac.get_bytes();
assert_eq!(bytes, [0x12u8, 0x34, 0x56, 0x78, 0x9a, 0xbc]);
let s = serde_json::to_string(&mac).expect("MacAddr serialization failed.");
assert_eq!(s, "\"12:34:56:78:9a:bc\"");
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/utils/net/ipv4addr.rs | src/vmm/src/utils/net/ipv4addr.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::net::Ipv4Addr;
/// Checks if an IPv4 address is RFC 3927 compliant.
pub fn is_link_local_valid(ipv4_addr: Ipv4Addr) -> bool {
match ipv4_addr.octets() {
[169, 254, 0, _] => false,
[169, 254, 255, _] => false,
[169, 254, _, _] => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use std::net::Ipv4Addr;
use super::*;
#[test]
fn test_is_link_local_valid() {
// Outside link-local IPv4 address range (169.254.0.0/16 - 169.254.255.255/16).
let mut ipv4_addr = Ipv4Addr::new(1, 1, 1, 1);
assert!(!is_link_local_valid(ipv4_addr));
// First 256 addresses can not be used, per RFC 3927.
ipv4_addr = Ipv4Addr::new(169, 254, 0, 0);
assert!(!is_link_local_valid(ipv4_addr));
ipv4_addr = Ipv4Addr::new(169, 254, 0, 10);
assert!(!is_link_local_valid(ipv4_addr));
ipv4_addr = Ipv4Addr::new(169, 254, 0, 255);
assert!(!is_link_local_valid(ipv4_addr));
// Last 256 addresses can not be used, per RFC 3927.
ipv4_addr = Ipv4Addr::new(169, 254, 255, 0);
assert!(!is_link_local_valid(ipv4_addr));
ipv4_addr = Ipv4Addr::new(169, 254, 255, 194);
assert!(!is_link_local_valid(ipv4_addr));
ipv4_addr = Ipv4Addr::new(169, 254, 255, 255);
assert!(!is_link_local_valid(ipv4_addr));
// First valid IPv4 link-local address.
ipv4_addr = Ipv4Addr::new(169, 254, 1, 0);
assert!(is_link_local_valid(ipv4_addr));
// Last valid IPv4 link-local address.
ipv4_addr = Ipv4Addr::new(169, 254, 254, 255);
assert!(is_link_local_valid(ipv4_addr));
// In between valid IPv4 link-local address.
ipv4_addr = Ipv4Addr::new(169, 254, 170, 2);
assert!(is_link_local_valid(ipv4_addr));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/mmds/persist.rs | src/vmm/src/mmds/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring MmdsNetworkStack.
use std::net::Ipv4Addr;
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use super::ns::MmdsNetworkStack;
use crate::mmds::data_store::Mmds;
use crate::snapshot::Persist;
use crate::utils::net::mac::{MAC_ADDR_LEN, MacAddr};
/// State of a MmdsNetworkStack.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MmdsNetworkStackState {
mac_addr: [u8; MAC_ADDR_LEN as usize],
ipv4_addr: u32,
tcp_port: u16,
}
impl Persist<'_> for MmdsNetworkStack {
type State = MmdsNetworkStackState;
type ConstructorArgs = Arc<Mutex<Mmds>>;
type Error = ();
fn save(&self) -> Self::State {
let mut mac_addr = [0; MAC_ADDR_LEN as usize];
mac_addr.copy_from_slice(self.mac_addr.get_bytes());
MmdsNetworkStackState {
mac_addr,
ipv4_addr: self.ipv4_addr.into(),
tcp_port: self.tcp_handler.local_port(),
}
}
fn restore(mmds: Self::ConstructorArgs, state: &Self::State) -> Result<Self, Self::Error> {
Ok(MmdsNetworkStack::new(
MacAddr::from_bytes_unchecked(&state.mac_addr),
Ipv4Addr::from(state.ipv4_addr),
state.tcp_port,
mmds,
))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::snapshot::Snapshot;
#[test]
fn test_persistence() {
let ns = MmdsNetworkStack::new_with_defaults(None, Arc::new(Mutex::new(Mmds::default())));
let mut mem = vec![0; 4096];
Snapshot::new(ns.save())
.save(&mut mem.as_mut_slice())
.unwrap();
let restored_ns = MmdsNetworkStack::restore(
Arc::new(Mutex::new(Mmds::default())),
&Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data,
)
.unwrap();
assert_eq!(restored_ns.mac_addr, ns.mac_addr);
assert_eq!(restored_ns.ipv4_addr, ns.ipv4_addr);
assert_eq!(
restored_ns.tcp_handler.local_port(),
ns.tcp_handler.local_port()
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/mmds/ns.rs | src/vmm/src/mmds/ns.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// TODO: get rid of this when splitting dumbo into public and internal parts.
#![allow(missing_docs)]
use std::convert::From;
use std::net::Ipv4Addr;
use std::num::NonZeroUsize;
use std::str::FromStr;
use std::sync::{Arc, Mutex};
use utils::time::timestamp_cycles;
use crate::dumbo::pdu::Incomplete;
use crate::dumbo::pdu::arp::{
ArpError as ArpFrameError, ETH_IPV4_FRAME_LEN, EthIPv4ArpFrame, test_speculative_tpa,
};
use crate::dumbo::pdu::ethernet::{
ETHERTYPE_ARP, ETHERTYPE_IPV4, EthernetError as EthernetFrameError, EthernetFrame,
};
use crate::dumbo::pdu::ipv4::{
IPv4Packet, Ipv4Error as IPv4PacketError, PROTOCOL_TCP, test_speculative_dst_addr,
};
use crate::dumbo::pdu::tcp::TcpError as TcpSegmentError;
use crate::dumbo::tcp::NextSegmentStatus;
use crate::dumbo::tcp::handler::{RecvEvent, TcpIPv4Handler, WriteEvent, WriteNextError};
use crate::logger::{IncMetric, METRICS};
use crate::mmds::data_store::Mmds;
use crate::utils::net::mac::MacAddr;
const DEFAULT_MAC_ADDR: &str = "06:01:23:45:67:01";
const DEFAULT_IPV4_ADDR: [u8; 4] = [169, 254, 169, 254];
const DEFAULT_TCP_PORT: u16 = 80;
const DEFAULT_MAX_CONNECTIONS: usize = 30;
const DEFAULT_MAX_PENDING_RESETS: usize = 100;
#[derive(Debug, PartialEq, thiserror::Error, displaydoc::Display)]
enum WriteArpFrameError {
/// NoPendingArpReply
NoPendingArpReply,
/// ARP error: {0}
Arp(#[from] ArpFrameError),
/// Ethernet error: {0}
Ethernet(#[from] EthernetFrameError),
}
#[derive(Debug, PartialEq, thiserror::Error, displaydoc::Display)]
enum WritePacketError {
/// IPv4Packet error: {0}
IPv4Packet(#[from] IPv4PacketError),
/// Ethernet error: {0}
Ethernet(#[from] EthernetFrameError),
/// TcpSegment error: {0}
TcpSegment(#[from] TcpSegmentError),
/// WriteNext error: {0}
WriteNext(#[from] WriteNextError),
}
#[derive(Debug)]
pub struct MmdsNetworkStack {
// Network interface MAC address used by frames/packets heading to MMDS server.
remote_mac_addr: MacAddr,
// The Ethernet MAC address of the MMDS server.
pub(crate) mac_addr: MacAddr,
// MMDS server IPv4 address.
pub ipv4_addr: Ipv4Addr,
// ARP reply destination IPv4 address (requester of address resolution reply).
// It is the Ipv4Addr of the network interface for which the MmdsNetworkStack
// routes the packets.
pending_arp_reply_dest: Option<Ipv4Addr>,
// This handles MMDS<->guest interaction at the TCP level.
pub(crate) tcp_handler: TcpIPv4Handler,
// Data store reference shared across all MmdsNetworkStack instances.
pub mmds: Arc<Mutex<Mmds>>,
}
impl MmdsNetworkStack {
pub fn new(
mac_addr: MacAddr,
ipv4_addr: Ipv4Addr,
tcp_port: u16,
mmds: Arc<Mutex<Mmds>>,
) -> Self {
MmdsNetworkStack {
remote_mac_addr: mac_addr,
mac_addr,
ipv4_addr,
pending_arp_reply_dest: None,
tcp_handler: TcpIPv4Handler::new(
ipv4_addr,
tcp_port,
NonZeroUsize::new(DEFAULT_MAX_CONNECTIONS).unwrap(),
NonZeroUsize::new(DEFAULT_MAX_PENDING_RESETS).unwrap(),
),
mmds,
}
}
pub fn new_with_defaults(mmds_ipv4_addr: Option<Ipv4Addr>, mmds: Arc<Mutex<Mmds>>) -> Self {
let mac_addr = MacAddr::from_str(DEFAULT_MAC_ADDR).unwrap();
let ipv4_addr = mmds_ipv4_addr.unwrap_or_else(|| Ipv4Addr::from(DEFAULT_IPV4_ADDR));
// The unwrap()s are safe because the given literals are greater than 0.
Self::new(mac_addr, ipv4_addr, DEFAULT_TCP_PORT, mmds)
}
pub fn set_ipv4_addr(&mut self, ipv4_addr: Ipv4Addr) {
self.ipv4_addr = ipv4_addr;
self.tcp_handler.set_local_ipv4_addr(ipv4_addr);
}
pub fn ipv4_addr(&self) -> Ipv4Addr {
self.ipv4_addr
}
pub fn default_ipv4_addr() -> Ipv4Addr {
Ipv4Addr::from(DEFAULT_IPV4_ADDR)
}
/// Check if a frame is destined for `mmds`
///
/// This returns `true` if the frame is an ARP or IPv4 frame destined for
/// the `mmds` service, or `false` otherwise. It does not consume the frame.
pub fn is_mmds_frame(&self, src: &[u8]) -> bool {
if let Ok(eth) = EthernetFrame::from_bytes(src) {
match eth.ethertype() {
ETHERTYPE_ARP => test_speculative_tpa(src, self.ipv4_addr),
ETHERTYPE_IPV4 => test_speculative_dst_addr(src, self.ipv4_addr),
_ => false,
}
} else {
false
}
}
/// Handles a frame destined for `mmds`
///
/// It assumes that the frame is indeed destined for `mmds`, so the caller
/// must make a call to `is_mmds_frame` to ensure that.
///
/// # Returns
///
/// `true` if the frame was consumed by `mmds` or `false` if an error occured
pub fn detour_frame(&mut self, src: &[u8]) -> bool {
if let Ok(eth) = EthernetFrame::from_bytes(src) {
match eth.ethertype() {
ETHERTYPE_ARP => return self.detour_arp(eth),
ETHERTYPE_IPV4 => return self.detour_ipv4(eth),
_ => (),
}
} else {
METRICS.mmds.rx_bad_eth.inc();
}
false
}
fn detour_arp(&mut self, eth: EthernetFrame<&[u8]>) -> bool {
if let Ok(arp) = EthIPv4ArpFrame::request_from_bytes(eth.payload()) {
self.remote_mac_addr = arp.sha();
self.pending_arp_reply_dest = Some(arp.spa());
return true;
}
false
}
fn detour_ipv4(&mut self, eth: EthernetFrame<&[u8]>) -> bool {
// TODO: We skip verifying the checksum, just in case the device model relies on offloading
// checksum computation from the guest driver to some other entity. Clear up this entire
// context at some point!
if let Ok(ip) = IPv4Packet::from_bytes(eth.payload(), false) {
if ip.protocol() == PROTOCOL_TCP {
// Note-1: `remote_mac_address` is actually the network device mac address, where
// this TCP segment came from.
// Note-2: For every routed packet we will have a single source MAC address, because
// each MmdsNetworkStack routes packets for only one network device.
self.remote_mac_addr = eth.src_mac();
let mmds_instance = self.mmds.clone();
match &mut self.tcp_handler.receive_packet(&ip, move |request| {
super::convert_to_response(mmds_instance, request)
}) {
Ok(event) => {
METRICS.mmds.rx_count.inc();
match event {
RecvEvent::NewConnectionSuccessful => {
METRICS.mmds.connections_created.inc()
}
RecvEvent::NewConnectionReplacing => {
METRICS.mmds.connections_created.inc();
METRICS.mmds.connections_destroyed.inc();
}
RecvEvent::EndpointDone => {
METRICS.mmds.connections_destroyed.inc();
}
_ => (),
}
}
Err(_) => METRICS.mmds.rx_accepted_err.inc(),
}
} else {
// A non-TCP IPv4 packet heading towards the MMDS; we consider it unusual.
METRICS.mmds.rx_accepted_unusual.inc();
}
return true;
}
false
}
// Allows the MMDS network stack to write a frame to the specified buffer. Will return:
// - None, if the MMDS network stack has no frame to send at this point. The buffer can be
// used for something else by the device model.
// - Some(len), if a frame of the given length has been written to the specified buffer.
pub fn write_next_frame(&mut self, buf: &mut [u8]) -> Option<NonZeroUsize> {
// We try to send ARP replies first.
if self.pending_arp_reply_dest.is_some() {
return match self.write_arp_reply(buf) {
Ok(something) => {
METRICS.mmds.tx_count.inc();
self.pending_arp_reply_dest = None;
something
}
Err(_) => {
METRICS.mmds.tx_errors.inc();
None
}
};
} else {
let call_write = match self.tcp_handler.next_segment_status() {
NextSegmentStatus::Available => true,
NextSegmentStatus::Timeout(value) => timestamp_cycles() >= value,
NextSegmentStatus::Nothing => false,
};
if call_write {
return match self.write_packet(buf) {
Ok(something) => {
METRICS.mmds.tx_count.inc();
something
}
Err(_) => {
METRICS.mmds.tx_errors.inc();
None
}
};
}
}
None
}
fn prepare_eth_unsized<'a>(
&self,
buf: &'a mut [u8],
ethertype: u16,
) -> Result<Incomplete<EthernetFrame<'a, &'a mut [u8]>>, EthernetFrameError> {
EthernetFrame::write_incomplete(buf, self.remote_mac_addr, self.mac_addr, ethertype)
}
fn write_arp_reply(&self, buf: &mut [u8]) -> Result<Option<NonZeroUsize>, WriteArpFrameError> {
let arp_reply_dest = self
.pending_arp_reply_dest
.ok_or(WriteArpFrameError::NoPendingArpReply)?;
let mut eth_unsized = self.prepare_eth_unsized(buf, ETHERTYPE_ARP)?;
let arp_len = EthIPv4ArpFrame::write_reply(
eth_unsized
.inner_mut()
.payload_mut()
.split_at_mut(ETH_IPV4_FRAME_LEN)
.0,
self.mac_addr,
self.ipv4_addr,
self.remote_mac_addr,
arp_reply_dest,
)?
.len();
Ok(Some(
// The unwrap() is safe because arp_len > 0.
NonZeroUsize::new(eth_unsized.with_payload_len_unchecked(arp_len).len()).unwrap(),
))
}
fn write_packet(&mut self, buf: &mut [u8]) -> Result<Option<NonZeroUsize>, WritePacketError> {
let mut eth_unsized = self.prepare_eth_unsized(buf, ETHERTYPE_IPV4)?;
let (maybe_len, event) = self
.tcp_handler
.write_next_packet(eth_unsized.inner_mut().payload_mut())?;
if let WriteEvent::EndpointDone = event {
METRICS.mmds.connections_destroyed.inc()
}
if let Some(packet_len) = maybe_len {
return Ok(Some(
// The unwrap() is safe because packet_len > 0.
NonZeroUsize::new(
eth_unsized
.with_payload_len_unchecked(packet_len.get())
.len(),
)
.unwrap(),
));
}
Ok(None)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::*;
use crate::dumbo::pdu::tcp::{Flags as TcpFlags, TcpSegment};
// We use LOCALHOST here because const new() is not stable yet, so just reuse this const, since
// all we're interested in is having some address different from the MMDS one.
const REMOTE_ADDR: Ipv4Addr = Ipv4Addr::LOCALHOST;
const REMOTE_MAC_STR: &str = "11:11:11:22:22:22";
const MMDS_PORT: u16 = 80;
const REMOTE_PORT: u16 = 1235;
const SEQ_NUMBER: u32 = 123;
// Helper methods which only make sense for testing.
impl MmdsNetworkStack {
fn write_arp_request(&mut self, buf: &mut [u8], for_mmds: bool) -> usize {
// Write a reply and then modify it into a request.
self.pending_arp_reply_dest = Some(REMOTE_ADDR);
let len = self.write_arp_reply(buf).unwrap().unwrap().get();
self.pending_arp_reply_dest = None;
let mut eth = EthernetFrame::from_bytes_unchecked(&mut buf[..len]);
let mut arp = EthIPv4ArpFrame::from_bytes_unchecked(eth.payload_mut());
// Set the operation to REQUEST.
arp.set_operation(1);
arp.set_sha(MacAddr::from_str(REMOTE_MAC_STR).unwrap());
arp.set_spa(REMOTE_ADDR);
// The tpa remains REMOTE_ADDR otherwise, and is thus invalid for the MMDS.
if for_mmds {
arp.set_tpa(self.ipv4_addr);
}
len
}
fn write_incoming_tcp_segment(
&self,
buf: &mut [u8],
addr: Ipv4Addr,
flags: TcpFlags,
) -> usize {
let mut eth_unsized = self.prepare_eth_unsized(buf, ETHERTYPE_IPV4).unwrap();
let packet_len = {
let mut packet = IPv4Packet::write_header(
eth_unsized.inner_mut().payload_mut(),
PROTOCOL_TCP,
REMOTE_ADDR,
addr,
)
.unwrap();
let segment_len = TcpSegment::write_incomplete_segment::<[u8]>(
packet.inner_mut().payload_mut(),
SEQ_NUMBER,
1234,
flags,
10000,
None,
0,
None,
)
.unwrap()
.finalize(REMOTE_PORT, MMDS_PORT, Some((REMOTE_ADDR, addr)))
.len();
packet.with_payload_len_unchecked(segment_len, true).len()
};
eth_unsized.with_payload_len_unchecked(packet_len).len()
}
fn next_frame_as_ipv4_packet<'a>(&mut self, buf: &'a mut [u8]) -> IPv4Packet<'_, &'a [u8]> {
let len = self.write_next_frame(buf).unwrap().get();
let eth = EthernetFrame::from_bytes(&buf[..len]).unwrap();
IPv4Packet::from_bytes(&buf[eth.payload_offset()..len], true).unwrap()
}
}
#[test]
fn test_ns_new_with_defaults() {
let ns = MmdsNetworkStack::new_with_defaults(None, Arc::new(Mutex::new(Mmds::default())));
assert_eq!(ns.mac_addr, MacAddr::from_str(DEFAULT_MAC_ADDR).unwrap());
assert_eq!(ns.ipv4_addr, Ipv4Addr::from(DEFAULT_IPV4_ADDR));
let ns = MmdsNetworkStack::new_with_defaults(
Some(Ipv4Addr::LOCALHOST),
Arc::new(Mutex::new(Mmds::default())),
);
assert_eq!(ns.mac_addr, MacAddr::from_str(DEFAULT_MAC_ADDR).unwrap());
assert_eq!(ns.ipv4_addr, Ipv4Addr::LOCALHOST);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_ns() {
let mut ns =
MmdsNetworkStack::new_with_defaults(None, Arc::new(Mutex::new(Mmds::default())));
let mut buf = [0u8; 2000];
let mut bad_buf = [0u8; 1];
let remote_mac = MacAddr::from_str(REMOTE_MAC_STR).unwrap();
let mmds_addr = ns.ipv4_addr;
let bad_mmds_addr = Ipv4Addr::from_str("1.2.3.4").unwrap();
// Buffer is too small.
assert!(!ns.is_mmds_frame(&bad_buf));
assert!(!ns.detour_frame(bad_buf.as_ref()));
// There's nothing to send right now.
assert!(ns.write_next_frame(buf.as_mut()).is_none());
{
let len = ns.write_arp_request(buf.as_mut(), false);
// Not asking for MMDS MAC address.
assert!(!ns.is_mmds_frame(&buf[..len]));
// There's still nothing to send.
assert!(ns.write_next_frame(buf.as_mut()).is_none());
}
{
let len = ns.write_arp_request(buf.as_mut(), true);
// Asking for MMDS MAC address.
assert!(ns.detour_frame(&buf[..len]));
assert_eq!(ns.remote_mac_addr, remote_mac);
}
// There should be an ARP reply to send.
{
// Buffer is too small.
assert!(ns.write_next_frame(bad_buf.as_mut()).is_none());
let curr_tx_count = METRICS.mmds.tx_count.count();
let len = ns.write_next_frame(buf.as_mut()).unwrap().get();
assert_eq!(curr_tx_count + 1, METRICS.mmds.tx_count.count());
let eth = EthernetFrame::from_bytes(&buf[..len]).unwrap();
let arp_reply = EthIPv4ArpFrame::from_bytes_unchecked(eth.payload());
// REPLY = 2
assert_eq!(arp_reply.operation(), 2);
assert_eq!(arp_reply.sha(), ns.mac_addr);
assert_eq!(arp_reply.spa(), ns.ipv4_addr);
assert_eq!(arp_reply.tha(), ns.remote_mac_addr);
assert_eq!(arp_reply.tpa(), REMOTE_ADDR);
}
// Nothing to send anymore.
assert!(ns.write_next_frame(buf.as_mut()).is_none());
// Let's send a TCP segment which will be rejected, because it's heading to the wrong
// address.
{
let len = ns.write_incoming_tcp_segment(buf.as_mut(), bad_mmds_addr, TcpFlags::ACK);
assert!(!ns.is_mmds_frame(&buf[..len]));
// Nothing to send in response.
assert!(ns.write_next_frame(buf.as_mut()).is_none());
}
// Let's send a TCP segment which will cause a RST to come out of the inner TCP handler.
{
let len = ns.write_incoming_tcp_segment(buf.as_mut(), mmds_addr, TcpFlags::ACK);
let curr_rx_count = METRICS.mmds.rx_count.count();
assert!(ns.detour_frame(&buf[..len]));
assert_eq!(curr_rx_count + 1, METRICS.mmds.rx_count.count());
}
// Let's check we actually get a RST when writing the next frame.
{
assert!(ns.write_next_frame(bad_buf.as_mut()).is_none());
let ip = ns.next_frame_as_ipv4_packet(buf.as_mut());
assert_eq!(ip.source_address(), mmds_addr);
assert_eq!(ip.destination_address(), REMOTE_ADDR);
let s = TcpSegment::from_bytes(
ip.payload(),
Some((ip.source_address(), ip.destination_address())),
)
.unwrap();
assert_eq!(s.flags_after_ns(), TcpFlags::RST);
assert_eq!(s.source_port(), MMDS_PORT);
assert_eq!(s.destination_port(), REMOTE_PORT);
}
// Nothing else to send.
assert!(ns.write_next_frame(buf.as_mut()).is_none());
// Let's send a TCP SYN into the ns.
{
let len = ns.write_incoming_tcp_segment(buf.as_mut(), mmds_addr, TcpFlags::SYN);
assert!(ns.detour_frame(&buf[..len]));
}
// We should be getting a SYNACK out of the ns in response.
{
let ip = ns.next_frame_as_ipv4_packet(buf.as_mut());
assert_eq!(ip.source_address(), mmds_addr);
assert_eq!(ip.destination_address(), REMOTE_ADDR);
let s = TcpSegment::from_bytes(
ip.payload(),
Some((ip.source_address(), ip.destination_address())),
)
.unwrap();
assert_eq!(s.flags_after_ns(), TcpFlags::SYN | TcpFlags::ACK);
assert_eq!(s.source_port(), MMDS_PORT);
assert_eq!(s.destination_port(), REMOTE_PORT);
assert_eq!(s.ack_number(), SEQ_NUMBER.wrapping_add(1));
}
// Nothing else to send.
assert!(ns.write_next_frame(buf.as_mut()).is_none());
}
#[test]
fn test_set_ipv4_addr() {
let mut ns =
MmdsNetworkStack::new_with_defaults(None, Arc::new(Mutex::new(Mmds::default())));
assert_ne!(ns.ipv4_addr, Ipv4Addr::LOCALHOST);
assert_ne!(ns.tcp_handler.local_ipv4_addr(), Ipv4Addr::LOCALHOST);
ns.set_ipv4_addr(Ipv4Addr::LOCALHOST);
assert_eq!(ns.ipv4_addr, Ipv4Addr::LOCALHOST);
assert_eq!(ns.tcp_handler.local_ipv4_addr(), Ipv4Addr::LOCALHOST);
}
#[test]
fn test_default_ipv4_addr() {
let actual = MmdsNetworkStack::default_ipv4_addr();
let expected = Ipv4Addr::from(DEFAULT_IPV4_ADDR);
assert_eq!(actual, expected);
}
#[test]
fn test_break_speculative_check_detour_arp() {
let mut buf = [0u8; 2000];
let ip = Ipv4Addr::from(DEFAULT_IPV4_ADDR);
let other_ip = Ipv4Addr::new(5, 6, 7, 8);
let mac = MacAddr::from_bytes_unchecked(&[0; 6]);
let mut ns =
MmdsNetworkStack::new_with_defaults(Some(ip), Arc::new(Mutex::new(Mmds::default())));
let mut eth =
EthernetFrame::write_incomplete(buf.as_mut(), mac, mac, ETHERTYPE_ARP).unwrap();
let mut arp = EthIPv4ArpFrame::from_bytes_unchecked(eth.inner_mut().payload_mut());
arp.set_tpa(other_ip);
let len = ns.write_arp_request(buf.as_mut(), false);
eth = EthernetFrame::write_incomplete(buf.as_mut(), mac, mac, ETHERTYPE_ARP).unwrap();
IPv4Packet::from_bytes_unchecked(eth.inner_mut().payload_mut()).set_destination_address(ip);
assert!(!ns.is_mmds_frame(&buf[..len]));
}
#[test]
fn test_break_speculative_check_detour_ipv4() {
let mut buf = [0u8; 2000];
let ip = Ipv4Addr::from(DEFAULT_IPV4_ADDR);
let other_ip = Ipv4Addr::new(5, 6, 7, 8);
let mac = MacAddr::from_bytes_unchecked(&[0; 6]);
let ns =
MmdsNetworkStack::new_with_defaults(Some(ip), Arc::new(Mutex::new(Mmds::default())));
let mut eth =
EthernetFrame::write_incomplete(buf.as_mut(), mac, mac, ETHERTYPE_IPV4).unwrap();
IPv4Packet::from_bytes_unchecked(eth.inner_mut().payload_mut())
.set_destination_address(other_ip);
let len = ns.write_incoming_tcp_segment(buf.as_mut(), other_ip, TcpFlags::SYN);
eth = EthernetFrame::write_incomplete(buf.as_mut(), mac, mac, ETHERTYPE_IPV4).unwrap();
let mut arp = EthIPv4ArpFrame::from_bytes_unchecked(eth.inner_mut().payload_mut());
arp.set_tpa(ip);
assert!(!ns.is_mmds_frame(&buf[..len]));
}
#[test]
fn test_wrong_ethertype() {
let mut buf = [0u8; 2000];
let ip = Ipv4Addr::from(DEFAULT_IPV4_ADDR);
let other_ip = Ipv4Addr::new(5, 6, 7, 8);
let mac = MacAddr::from_bytes_unchecked(&[0; 6]);
let mut ns =
MmdsNetworkStack::new_with_defaults(Some(ip), Arc::new(Mutex::new(Mmds::default())));
// try IPv4 with detour_arp
let mut eth =
EthernetFrame::write_incomplete(buf.as_mut(), mac, mac, ETHERTYPE_IPV4).unwrap();
IPv4Packet::from_bytes_unchecked(eth.inner_mut().payload_mut())
.set_destination_address(other_ip);
let len = ns.write_incoming_tcp_segment(buf.as_mut(), other_ip, TcpFlags::SYN);
eth = EthernetFrame::write_incomplete(buf.as_mut(), mac, mac, ETHERTYPE_IPV4).unwrap();
let mut arp = EthIPv4ArpFrame::from_bytes_unchecked(eth.inner_mut().payload_mut());
arp.set_tpa(ip);
assert!(ns.detour_ipv4(EthernetFrame::from_bytes(&buf[..len]).unwrap()));
assert!(!ns.detour_arp(EthernetFrame::from_bytes(&buf[..len]).unwrap()));
// try IPv4 with detour_arp
let mut eth =
EthernetFrame::write_incomplete(buf.as_mut(), mac, mac, ETHERTYPE_ARP).unwrap();
let mut arp = EthIPv4ArpFrame::from_bytes_unchecked(eth.inner_mut().payload_mut());
arp.set_tpa(other_ip);
let len = ns.write_arp_request(buf.as_mut(), false);
eth = EthernetFrame::write_incomplete(buf.as_mut(), mac, mac, ETHERTYPE_ARP).unwrap();
IPv4Packet::from_bytes_unchecked(eth.inner_mut().payload_mut()).set_destination_address(ip);
assert!(ns.detour_arp(EthernetFrame::from_bytes(&buf[..len]).unwrap()));
assert!(!ns.detour_ipv4(EthernetFrame::from_bytes(&buf[..len]).unwrap()));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/mmds/mod.rs | src/vmm/src/mmds/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// MMDS data store
pub mod data_store;
/// MMDS network stack
pub mod ns;
/// Defines the structures needed for saving/restoring MmdsNetworkStack.
pub mod persist;
mod token;
/// MMDS token headers
pub mod token_headers;
use std::sync::{Arc, Mutex};
use micro_http::{
Body, HttpHeaderError, MediaType, Method, Request, RequestError, Response, StatusCode, Version,
};
use serde_json::{Map, Value};
use crate::logger::{IncMetric, METRICS};
use crate::mmds::data_store::{Mmds, MmdsDatastoreError as MmdsError, MmdsVersion, OutputFormat};
use crate::mmds::token::PATH_TO_TOKEN;
use crate::mmds::token_headers::{
X_AWS_EC2_METADATA_TOKEN_HEADER, X_AWS_EC2_METADATA_TOKEN_SSL_SECONDS_HEADER,
X_FORWARDED_FOR_HEADER, X_METADATA_TOKEN_HEADER, X_METADATA_TOKEN_TTL_SECONDS_HEADER,
get_header_value_pair,
};
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// MMDS token errors
pub enum VmmMmdsError {
/// MMDS token not valid.
InvalidToken,
/// Invalid URI.
InvalidURI,
/// Not allowed HTTP method.
MethodNotAllowed,
/// No MMDS token provided. Use `X-metadata-token` or `X-aws-ec2-metadata-token` header to specify the session token.
NoTokenProvided,
/// Token time to live value not found. Use `X-metadata-token-ttl-seconds` or `X-aws-ec2-metadata-token-ttl-seconds` header to specify the token's lifetime.
NoTtlProvided,
/// Resource not found: {0}.
ResourceNotFound(String),
}
impl From<MediaType> for OutputFormat {
fn from(media_type: MediaType) -> Self {
match media_type {
MediaType::ApplicationJson => OutputFormat::Json,
MediaType::PlainText => OutputFormat::Imds,
}
}
}
// Builds the `micro_http::Response` with a given HTTP version, status code, and body.
fn build_response(
http_version: Version,
status_code: StatusCode,
content_type: MediaType,
body: Body,
) -> Response {
let mut response = Response::new(http_version, status_code);
response.set_content_type(content_type);
response.set_body(body);
response
}
/// Patch provided JSON document (given as `serde_json::Value`) in-place with JSON Merge Patch
/// [RFC 7396](https://tools.ietf.org/html/rfc7396).
pub fn json_patch(target: &mut Value, patch: &Value) {
if patch.is_object() {
if !target.is_object() {
// Replace target with a serde_json object so we can recursively copy patch values.
*target = Value::Object(Map::new());
}
// This is safe since we make sure patch and target are objects beforehand.
let doc = target.as_object_mut().unwrap();
for (key, value) in patch.as_object().unwrap() {
if value.is_null() {
// If the value in the patch is null we remove the entry.
doc.remove(key.as_str());
} else {
// Recursive call to update target document.
// If `key` is not in the target document (it's a new field defined in `patch`)
// insert a null placeholder and pass it as the new target
// so we can insert new values recursively.
json_patch(doc.entry(key.as_str()).or_insert(Value::Null), value);
}
}
} else {
*target = patch.clone();
}
}
// Make the URI a correct JSON pointer value.
fn sanitize_uri(mut uri: String) -> String {
let mut len = u32::MAX as usize;
// Loop while the deduping decreases the sanitized len.
// Each iteration will attempt to dedup "//".
while uri.len() < len {
len = uri.len();
uri = uri.replace("//", "/");
}
uri
}
/// Build a response for `request` and return response based on MMDS version
pub fn convert_to_response(mmds: Arc<Mutex<Mmds>>, request: Request) -> Response {
// Check URI is not empty
let uri = request.uri().get_abs_path();
if uri.is_empty() {
return build_response(
request.http_version(),
StatusCode::BadRequest,
MediaType::PlainText,
Body::new(VmmMmdsError::InvalidURI.to_string()),
);
}
let mut mmds_guard = mmds.lock().expect("Poisoned lock");
// Allow only GET and PUT requests
match request.method() {
Method::Get => match mmds_guard.version() {
MmdsVersion::V1 => respond_to_get_request_v1(&mmds_guard, request),
MmdsVersion::V2 => respond_to_get_request_v2(&mmds_guard, request),
},
Method::Put => respond_to_put_request(&mut mmds_guard, request),
_ => {
let mut response = build_response(
request.http_version(),
StatusCode::MethodNotAllowed,
MediaType::PlainText,
Body::new(VmmMmdsError::MethodNotAllowed.to_string()),
);
response.allow_method(Method::Get);
response.allow_method(Method::Put);
response
}
}
}
fn respond_to_get_request_v1(mmds: &Mmds, request: Request) -> Response {
match get_header_value_pair(
request.headers.custom_entries(),
&[X_METADATA_TOKEN_HEADER, X_AWS_EC2_METADATA_TOKEN_HEADER],
) {
Some((_, token)) => {
if !mmds.is_valid_token(token) {
METRICS.mmds.rx_invalid_token.inc();
}
}
None => {
METRICS.mmds.rx_no_token.inc();
}
}
respond_to_get_request(mmds, request)
}
fn respond_to_get_request_v2(mmds: &Mmds, request: Request) -> Response {
// Check whether a token exists.
let token = match get_header_value_pair(
request.headers.custom_entries(),
&[X_METADATA_TOKEN_HEADER, X_AWS_EC2_METADATA_TOKEN_HEADER],
) {
Some((_, token)) => token,
None => {
METRICS.mmds.rx_no_token.inc();
let error_msg = VmmMmdsError::NoTokenProvided.to_string();
return build_response(
request.http_version(),
StatusCode::Unauthorized,
MediaType::PlainText,
Body::new(error_msg),
);
}
};
// Validate the token.
match mmds.is_valid_token(token) {
true => respond_to_get_request(mmds, request),
false => {
METRICS.mmds.rx_invalid_token.inc();
build_response(
request.http_version(),
StatusCode::Unauthorized,
MediaType::PlainText,
Body::new(VmmMmdsError::InvalidToken.to_string()),
)
}
}
}
fn respond_to_get_request(mmds: &Mmds, request: Request) -> Response {
let uri = request.uri().get_abs_path();
// The data store expects a strict json path, so we need to
// sanitize the URI.
let json_path = sanitize_uri(uri.to_string());
let content_type = request.headers.accept();
match mmds.get_value(json_path, content_type.into()) {
Ok(response_body) => build_response(
request.http_version(),
StatusCode::OK,
content_type,
Body::new(response_body),
),
Err(err) => match err {
MmdsError::NotFound => {
let error_msg = VmmMmdsError::ResourceNotFound(String::from(uri)).to_string();
build_response(
request.http_version(),
StatusCode::NotFound,
MediaType::PlainText,
Body::new(error_msg),
)
}
MmdsError::UnsupportedValueType => build_response(
request.http_version(),
StatusCode::NotImplemented,
MediaType::PlainText,
Body::new(err.to_string()),
),
MmdsError::DataStoreLimitExceeded => build_response(
request.http_version(),
StatusCode::PayloadTooLarge,
MediaType::PlainText,
Body::new(err.to_string()),
),
_ => unreachable!(),
},
}
}
fn respond_to_put_request(mmds: &mut Mmds, request: Request) -> Response {
let custom_headers = request.headers.custom_entries();
// Reject `PUT` requests that contain `X-Forwarded-For` header.
if let Some((header, _)) = get_header_value_pair(custom_headers, &[X_FORWARDED_FOR_HEADER]) {
let error_msg =
RequestError::HeaderError(HttpHeaderError::UnsupportedName(header.to_string()))
.to_string();
return build_response(
request.http_version(),
StatusCode::BadRequest,
MediaType::PlainText,
Body::new(error_msg),
);
}
let uri = request.uri().get_abs_path();
// Sanitize the URI into a strict json path.
let json_path = sanitize_uri(uri.to_string());
// Only accept PUT requests towards TOKEN_PATH.
if json_path != PATH_TO_TOKEN {
let error_msg = VmmMmdsError::ResourceNotFound(String::from(uri)).to_string();
return build_response(
request.http_version(),
StatusCode::NotFound,
MediaType::PlainText,
Body::new(error_msg),
);
}
// Get token lifetime value.
let (header, ttl_seconds) = match get_header_value_pair(
custom_headers,
&[
X_METADATA_TOKEN_TTL_SECONDS_HEADER,
X_AWS_EC2_METADATA_TOKEN_SSL_SECONDS_HEADER,
],
) {
// Header found
Some((header, value)) => match value.parse::<u32>() {
Ok(ttl_seconds) => (header, ttl_seconds),
Err(_) => {
return build_response(
request.http_version(),
StatusCode::BadRequest,
MediaType::PlainText,
Body::new(
RequestError::HeaderError(HttpHeaderError::InvalidValue(
header.into(),
value.into(),
))
.to_string(),
),
);
}
},
// Header not found
None => {
return build_response(
request.http_version(),
StatusCode::BadRequest,
MediaType::PlainText,
Body::new(VmmMmdsError::NoTtlProvided.to_string()),
);
}
};
// Generate token.
let result = mmds.generate_token(ttl_seconds);
match result {
Ok(token) => {
let mut response = build_response(
request.http_version(),
StatusCode::OK,
MediaType::PlainText,
Body::new(token),
);
let custom_headers = [(header.into(), ttl_seconds.to_string())].into();
// Safe to unwrap because the header name and the value are valid as US-ASCII.
// - `header` is either `X_METADATA_TOKEN_TTL_SECONDS_HEADER` or
// `X_AWS_EC2_METADATA_TOKEN_SSL_SECONDS_HEADER`.
// - `ttl_seconds` is a decimal number between `MIN_TOKEN_TTL_SECONDS` and
// `MAX_TOKEN_TTL_SECONDS`.
response.set_custom_headers(&custom_headers).unwrap();
response
}
Err(err) => build_response(
request.http_version(),
StatusCode::BadRequest,
MediaType::PlainText,
Body::new(err.to_string()),
),
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use super::*;
use crate::mmds::token::{MAX_TOKEN_TTL_SECONDS, MIN_TOKEN_TTL_SECONDS};
fn populate_mmds() -> Arc<Mutex<Mmds>> {
let data = r#"{
"name": {
"first": "John",
"second": "Doe"
},
"age": 43,
"phones": {
"home": {
"RO": "+401234567",
"UK": "+441234567"
},
"mobile": "+442345678"
}
}"#;
let mmds = Arc::new(Mutex::new(Mmds::default()));
mmds.lock()
.expect("Poisoned lock")
.put_data(serde_json::from_str(data).unwrap())
.unwrap();
mmds
}
fn get_json_data() -> &'static str {
r#"{
"age": 43,
"name": {
"first": "John",
"second": "Doe"
},
"phones": {
"home": {
"RO": "+401234567",
"UK": "+441234567"
},
"mobile": "+442345678"
}
}"#
}
fn get_plain_text_data() -> &'static str {
"age\nname/\nphones/"
}
fn generate_request_and_expected_response(
request_bytes: &[u8],
media_type: MediaType,
) -> (Request, Response) {
let request = Request::try_from(request_bytes, None).unwrap();
let mut response = Response::new(Version::Http10, StatusCode::OK);
response.set_content_type(media_type);
let body = match media_type {
MediaType::ApplicationJson => {
let mut body = get_json_data().to_string();
body.retain(|c| !c.is_whitespace());
body
}
MediaType::PlainText => get_plain_text_data().to_string(),
};
response.set_body(Body::new(body));
(request, response)
}
#[test]
fn test_sanitize_uri() {
let sanitized = "/a/b/c/d";
assert_eq!(sanitize_uri("/a/b/c/d".to_owned()), sanitized);
assert_eq!(sanitize_uri("/a////b/c//d".to_owned()), sanitized);
assert_eq!(sanitize_uri("/a///b/c///d".to_owned()), sanitized);
assert_eq!(sanitize_uri("/a//b/c////d".to_owned()), sanitized);
assert_eq!(sanitize_uri("///////a//b///c//d".to_owned()), sanitized);
assert_eq!(sanitize_uri("a".to_owned()), "a");
assert_eq!(sanitize_uri("a/".to_owned()), "a/");
assert_eq!(sanitize_uri("aa//".to_owned()), "aa/");
assert_eq!(sanitize_uri("aa".to_owned()), "aa");
assert_eq!(sanitize_uri("/".to_owned()), "/");
assert_eq!(sanitize_uri("".to_owned()), "");
assert_eq!(sanitize_uri("////".to_owned()), "/");
assert_eq!(sanitize_uri("aa//bb///cc//d".to_owned()), "aa/bb/cc/d");
assert_eq!(sanitize_uri("//aa//bb///cc//d".to_owned()), "/aa/bb/cc/d");
}
#[test]
fn test_request_accept_header() {
// This test validates the response `Content-Type` header and the response content for
// various request `Accept` headers.
// Populate MMDS with data.
let mmds = populate_mmds();
// Test without `Accept` header. micro-http defaults to `Accept: text/plain`.
let (request, expected_response) = generate_request_and_expected_response(
b"GET http://169.254.169.254/ HTTP/1.0\r\n\r\n",
MediaType::PlainText,
);
assert_eq!(
convert_to_response(mmds.clone(), request),
expected_response
);
// Test with empty `Accept` header. micro-http defaults to `Accept: text/plain`.
let (request, expected_response) = generate_request_and_expected_response(
b"GET http://169.254.169.254/ HTTP/1.0\r\n\"
Accept:\r\n\r\n",
MediaType::PlainText,
);
assert_eq!(
convert_to_response(mmds.clone(), request),
expected_response
);
// Test with `Accept: */*` header.
let (request, expected_response) = generate_request_and_expected_response(
b"GET http://169.254.169.254/ HTTP/1.0\r\n\"
Accept: */*\r\n\r\n",
MediaType::PlainText,
);
assert_eq!(
convert_to_response(mmds.clone(), request),
expected_response
);
// Test with `Accept: text/plain`.
let (request, expected_response) = generate_request_and_expected_response(
b"GET http://169.254.169.254/ HTTP/1.0\r\n\
Accept: text/plain\r\n\r\n",
MediaType::PlainText,
);
assert_eq!(
convert_to_response(mmds.clone(), request),
expected_response
);
// Test with `Accept: application/json`.
let (request, expected_response) = generate_request_and_expected_response(
b"GET http://169.254.169.254/ HTTP/1.0\r\n\
Accept: application/json\r\n\r\n",
MediaType::ApplicationJson,
);
assert_eq!(convert_to_response(mmds, request), expected_response);
}
// Test the version-independent error paths of `convert_to_response()`.
#[test]
fn test_convert_to_response_negative() {
for version in [MmdsVersion::V1, MmdsVersion::V2] {
let mmds = populate_mmds();
mmds.lock().expect("Poisoned lock").set_version(version);
// Test InvalidURI (empty absolute path).
let request = Request::try_from(b"GET http:// HTTP/1.0\r\n\r\n", None).unwrap();
let mut expected_response = Response::new(Version::Http10, StatusCode::BadRequest);
expected_response.set_content_type(MediaType::PlainText);
expected_response.set_body(Body::new(VmmMmdsError::InvalidURI.to_string()));
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
// Test MethodNotAllowed (PATCH method).
let request =
Request::try_from(b"PATCH http://169.254.169.255/ HTTP/1.0\r\n\r\n", None).unwrap();
let mut expected_response =
Response::new(Version::Http10, StatusCode::MethodNotAllowed);
expected_response.set_content_type(MediaType::PlainText);
expected_response.set_body(Body::new(VmmMmdsError::MethodNotAllowed.to_string()));
expected_response.allow_method(Method::Get);
expected_response.allow_method(Method::Put);
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
}
}
#[test]
fn test_respond_to_request_mmdsv1() {
let mmds = populate_mmds();
mmds.lock()
.expect("Poisoned lock")
.set_version(MmdsVersion::V1);
// Test valid v1 GET request.
let (request, expected_response) = generate_request_and_expected_response(
b"GET http://169.254.169.254/ HTTP/1.0\r\n\
Accept: application/json\r\n\r\n",
MediaType::ApplicationJson,
);
let prev_rx_invalid_token = METRICS.mmds.rx_invalid_token.count();
let prev_rx_no_token = METRICS.mmds.rx_no_token.count();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
assert_eq!(prev_rx_invalid_token, METRICS.mmds.rx_invalid_token.count());
assert_eq!(prev_rx_no_token + 1, METRICS.mmds.rx_no_token.count());
// Test valid PUT request to generate a valid token.
let request = Request::try_from(
b"PUT http://169.254.169.254/latest/api/token HTTP/1.0\r\n\
X-metadata-token-ttl-seconds: 60\r\n\r\n",
None,
)
.unwrap();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response.status(), StatusCode::OK);
assert_eq!(actual_response.content_type(), MediaType::PlainText);
let valid_token = String::from_utf8(actual_response.body().unwrap().body).unwrap();
// Test valid v2 GET request.
#[rustfmt::skip]
let (request, expected_response) = generate_request_and_expected_response(
format!(
"GET http://169.254.169.254/ HTTP/1.0\r\n\
Accept: application/json\r\n\
X-metadata-token: {valid_token}\r\n\r\n",
)
.as_bytes(),
MediaType::ApplicationJson,
);
let prev_rx_invalid_token = METRICS.mmds.rx_invalid_token.count();
let prev_rx_no_token = METRICS.mmds.rx_no_token.count();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
assert_eq!(prev_rx_invalid_token, METRICS.mmds.rx_invalid_token.count());
assert_eq!(prev_rx_no_token, METRICS.mmds.rx_no_token.count());
// Test GET request with invalid token is accepted when v1 is configured.
let (request, expected_response) = generate_request_and_expected_response(
b"GET http://169.254.169.254/ HTTP/1.0\r\n\
Accept: application/json\r\n\
X-metadata-token: INVALID_TOKEN\r\n\r\n",
MediaType::ApplicationJson,
);
let prev_rx_invalid_token = METRICS.mmds.rx_invalid_token.count();
let prev_rx_no_token = METRICS.mmds.rx_no_token.count();
let actual_response = convert_to_response(mmds, request);
assert_eq!(actual_response, expected_response);
assert_eq!(
prev_rx_invalid_token + 1,
METRICS.mmds.rx_invalid_token.count()
);
assert_eq!(prev_rx_no_token, METRICS.mmds.rx_no_token.count());
}
#[test]
fn test_respond_to_request_mmdsv2() {
let mmds = populate_mmds();
mmds.lock()
.expect("Poisoned lock")
.set_version(MmdsVersion::V2);
// Test valid PUT to generate a valid token.
let request = Request::try_from(
b"PUT http://169.254.169.254/latest/api/token HTTP/1.0\r\n\
X-metadata-token-ttl-seconds: 60\r\n\r\n",
None,
)
.unwrap();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response.status(), StatusCode::OK);
assert_eq!(actual_response.content_type(), MediaType::PlainText);
let valid_token = String::from_utf8(actual_response.body().unwrap().body).unwrap();
// Test valid GET.
#[rustfmt::skip]
let (request, expected_response) = generate_request_and_expected_response(
format!(
"GET http://169.254.169.254/ HTTP/1.0\r\n\
Accept: application/json\r\n\
X-metadata-token: {valid_token}\r\n\r\n",
)
.as_bytes(),
MediaType::ApplicationJson,
);
let prev_rx_invalid_token = METRICS.mmds.rx_invalid_token.count();
let prev_rx_no_token = METRICS.mmds.rx_no_token.count();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
assert_eq!(prev_rx_invalid_token, METRICS.mmds.rx_invalid_token.count());
assert_eq!(prev_rx_no_token, METRICS.mmds.rx_no_token.count());
// Test GET request without token should return Unauthorized status code.
let request =
Request::try_from(b"GET http://169.254.169.254/ HTTP/1.0\r\n\r\n", None).unwrap();
let mut expected_response = Response::new(Version::Http10, StatusCode::Unauthorized);
expected_response.set_content_type(MediaType::PlainText);
expected_response.set_body(Body::new(VmmMmdsError::NoTokenProvided.to_string()));
let prev_rx_no_token = METRICS.mmds.rx_no_token.count();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
assert_eq!(prev_rx_no_token + 1, METRICS.mmds.rx_no_token.count());
// Create an expired token.
let request = Request::try_from(
b"PUT http://169.254.169.254/latest/api/token HTTP/1.0\r\n\
X-metadata-token-ttl-seconds: 1\r\n\r\n",
None,
)
.unwrap();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response.status(), StatusCode::OK);
assert_eq!(actual_response.content_type(), MediaType::PlainText);
let expired_token = String::from_utf8(actual_response.body().unwrap().body).unwrap();
std::thread::sleep(Duration::from_secs(1));
// Test GET request with invalid tokens.
let tokens = ["INVALID_TOKEN", &expired_token];
for token in tokens.iter() {
#[rustfmt::skip]
let request = Request::try_from(
format!(
"GET http://169.254.169.254/ HTTP/1.0\r\n\
X-metadata-token: {token}\r\n\r\n",
)
.as_bytes(),
None,
)
.unwrap();
let mut expected_response = Response::new(Version::Http10, StatusCode::Unauthorized);
expected_response.set_content_type(MediaType::PlainText);
expected_response.set_body(Body::new(VmmMmdsError::InvalidToken.to_string()));
let prev_rx_invalid_token = METRICS.mmds.rx_invalid_token.count();
let prev_rx_no_token = METRICS.mmds.rx_no_token.count();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
assert_eq!(
prev_rx_invalid_token + 1,
METRICS.mmds.rx_invalid_token.count()
);
assert_eq!(prev_rx_no_token, METRICS.mmds.rx_no_token.count());
}
}
// Test the version-independent parts of GET request
#[test]
fn test_respond_to_get_request() {
for version in [MmdsVersion::V1, MmdsVersion::V2] {
let mmds = populate_mmds();
mmds.lock().expect("Poisoned lock").set_version(version);
// Generate a token
let request = Request::try_from(
b"PUT http://169.254.169.254/latest/api/token HTTP/1.0\r\n\
X-metadata-token-ttl-seconds: 60\r\n\r\n",
None,
)
.unwrap();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response.status(), StatusCode::OK);
assert_eq!(actual_response.content_type(), MediaType::PlainText);
let valid_token = String::from_utf8(actual_response.body().unwrap().body).unwrap();
// Test invalid path
#[rustfmt::skip]
let request = Request::try_from(
format!(
"GET http://169.254.169.254/invalid HTTP/1.0\r\n\
X-metadata-token: {valid_token}\r\n\r\n",
)
.as_bytes(),
None,
)
.unwrap();
let mut expected_response = Response::new(Version::Http10, StatusCode::NotFound);
expected_response.set_content_type(MediaType::PlainText);
expected_response.set_body(Body::new(
VmmMmdsError::ResourceNotFound(String::from("/invalid")).to_string(),
));
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
// Test unsupported type
#[rustfmt::skip]
let request = Request::try_from(
format!(
"GET /age HTTP/1.1\r\n\
X-metadata-token: {valid_token}\r\n\r\n",
)
.as_bytes(),
None,
)
.unwrap();
let mut expected_response = Response::new(Version::Http11, StatusCode::NotImplemented);
expected_response.set_content_type(MediaType::PlainText);
let body = "Cannot retrieve value. The value has an unsupported type.".to_string();
expected_response.set_body(Body::new(body));
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
// Test invalid `X-metadata-token-ttl-seconds` value is ignored if not PUT request.
#[rustfmt::skip]
let (request, expected_response) = generate_request_and_expected_response(
format!(
"GET http://169.254.169.254/ HTTP/1.0\r\n\
X-metadata-token: {valid_token}\r\n\
X-metadata-token-ttl-seconds: application/json\r\n\r\n",
)
.as_bytes(),
MediaType::PlainText,
);
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
}
}
// Test PUT request (version-independent)
#[test]
fn test_respond_to_put_request() {
for version in [MmdsVersion::V1, MmdsVersion::V2] {
let mmds = populate_mmds();
mmds.lock().expect("Poisoned lock").set_version(version);
// Test valid PUT
let request = Request::try_from(
b"PUT http://169.254.169.254/latest/api/token HTTP/1.0\r\n\
X-metadata-token-ttl-seconds: 60\r\n\r\n",
None,
)
.unwrap();
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response.status(), StatusCode::OK);
assert_eq!(actual_response.content_type(), MediaType::PlainText);
assert_eq!(
actual_response
.custom_headers()
.get("X-metadata-token-ttl-seconds")
.unwrap(),
"60"
);
// Test unsupported `X-Forwarded-For` header
for header in ["X-Forwarded-For", "x-forwarded-for", "X-fOrWaRdEd-FoR"] {
#[rustfmt::skip]
let request = Request::try_from(
format!(
"PUT http://169.254.169.254/latest/api/token HTTP/1.0\r\n\
{header}: 203.0.113.195\r\n\r\n"
)
.as_bytes(),
None,
)
.unwrap();
let mut expected_response = Response::new(Version::Http10, StatusCode::BadRequest);
expected_response.set_content_type(MediaType::PlainText);
expected_response.set_body(Body::new(format!(
"Invalid header. Reason: Unsupported header name. Key: {header}"
)));
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
}
// Test invalid path
let request = Request::try_from(
b"PUT http://169.254.169.254/token HTTP/1.0\r\n\
X-metadata-token-ttl-seconds: 60\r\n\r\n",
None,
)
.unwrap();
let mut expected_response = Response::new(Version::Http10, StatusCode::NotFound);
expected_response.set_content_type(MediaType::PlainText);
expected_response.set_body(Body::new(
VmmMmdsError::ResourceNotFound(String::from("/token")).to_string(),
));
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
// Test non-numeric `X-metadata-token-ttl-seconds` value
let request = Request::try_from(
b"PUT http://169.254.169.254/latest/api/token HTTP/1.0\r\n\
X-metadata-token-ttl-seconds: application/json\r\n\r\n",
None,
)
.unwrap();
let mut expected_response = Response::new(Version::Http10, StatusCode::BadRequest);
expected_response.set_content_type(MediaType::PlainText);
#[rustfmt::skip]
expected_response.set_body(Body::new(
"Invalid header. Reason: Invalid value. \
Key:X-metadata-token-ttl-seconds; Value:application/json"
.to_string(),
));
let actual_response = convert_to_response(mmds.clone(), request);
assert_eq!(actual_response, expected_response);
// Test out-of-range `X-metadata-token-ttl-seconds` value
let invalid_values = [MIN_TOKEN_TTL_SECONDS - 1, MAX_TOKEN_TTL_SECONDS + 1];
for invalid_value in invalid_values.iter() {
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/mmds/data_store.rs | src/vmm/src/mmds/data_store.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt;
use std::fmt::{Display, Formatter};
use serde::{Deserialize, Serialize};
use serde_json::{Value, to_vec};
use crate::mmds::token::{MmdsTokenError as TokenError, TokenAuthority};
/// The Mmds is the Microvm Metadata Service represented as an untyped json.
#[derive(Debug)]
pub struct Mmds {
version: MmdsVersion,
data_store: Value,
token_authority: TokenAuthority,
is_initialized: bool,
data_store_limit: usize,
imds_compat: bool,
}
/// MMDS version.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
pub enum MmdsVersion {
#[default]
/// MMDS version 1
V1,
/// MMDS version 2
V2,
}
impl Display for MmdsVersion {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
MmdsVersion::V1 => write!(f, "V1"),
MmdsVersion::V2 => write!(f, "V2"),
}
}
}
/// MMDS possible outputs.
#[derive(Debug, Clone, Copy)]
pub enum OutputFormat {
/// MMDS output format as Json
Json,
/// MMDS output format as Imds
Imds,
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// MMDS data store errors
pub enum MmdsDatastoreError {
/// The MMDS patch request doesn't fit.
DataStoreLimitExceeded,
/// The MMDS resource does not exist.
NotFound,
/// The MMDS data store is not initialized.
NotInitialized,
/// Token Authority error: {0}
TokenAuthority(#[from] TokenError),
/// Cannot retrieve value. The value has an unsupported type.
UnsupportedValueType,
}
// Used for ease of use in tests.
impl Default for Mmds {
fn default() -> Self {
Self::try_new(51200).unwrap()
}
}
impl Mmds {
/// MMDS default instance with limit `data_store_limit`
pub fn try_new(data_store_limit: usize) -> Result<Self, MmdsDatastoreError> {
Ok(Mmds {
version: MmdsVersion::default(),
data_store: Value::default(),
token_authority: TokenAuthority::try_new()?,
is_initialized: false,
data_store_limit,
imds_compat: false,
})
}
/// This method is needed to check if data store is initialized.
/// When a PATCH request is made on an uninitialized Mmds structure this method
/// should return a NotFound error.
fn check_data_store_initialized(&self) -> Result<(), MmdsDatastoreError> {
if self.is_initialized {
Ok(())
} else {
Err(MmdsDatastoreError::NotInitialized)
}
}
/// Set the MMDS version.
pub fn set_version(&mut self, version: MmdsVersion) {
self.version = version;
}
/// Get the MMDS version.
pub fn version(&self) -> MmdsVersion {
self.version
}
/// Set the compatibility with EC2 IMDS.
pub fn set_imds_compat(&mut self, imds_compat: bool) {
self.imds_compat = imds_compat;
}
/// Get the compatibility with EC2 IMDS.
pub fn imds_compat(&self) -> bool {
self.imds_compat
}
/// Sets the Additional Authenticated Data to be used for encryption and
/// decryption of the session token.
pub fn set_aad(&mut self, instance_id: &str) {
self.token_authority.set_aad(instance_id);
}
/// Checks if the provided token has not expired.
pub fn is_valid_token(&self, token: &str) -> bool {
self.token_authority.is_valid(token)
}
/// Generate a new Mmds token using the token authority.
pub fn generate_token(&mut self, ttl_seconds: u32) -> Result<String, TokenError> {
self.token_authority.generate_token_secret(ttl_seconds)
}
/// set MMDS data store limit to `data_store_limit`
pub fn set_data_store_limit(&mut self, data_store_limit: usize) {
self.data_store_limit = data_store_limit;
}
/// put `data` in MMDS data store
pub fn put_data(&mut self, data: Value) -> Result<(), MmdsDatastoreError> {
// It is safe to unwrap because any map keys are all strings and
// we are using default serializer which does not return error.
if to_vec(&data).unwrap().len() > self.data_store_limit {
Err(MmdsDatastoreError::DataStoreLimitExceeded)
} else {
self.data_store = data;
self.is_initialized = true;
Ok(())
}
}
/// patch update MMDS data store with `patch_data`
pub fn patch_data(&mut self, patch_data: Value) -> Result<(), MmdsDatastoreError> {
self.check_data_store_initialized()?;
let mut data_store_clone = self.data_store.clone();
super::json_patch(&mut data_store_clone, &patch_data);
// It is safe to unwrap because our data store keys are all strings and
// we are using default serializer which does not return error.
if to_vec(&data_store_clone).unwrap().len() > self.data_store_limit {
return Err(MmdsDatastoreError::DataStoreLimitExceeded);
}
self.data_store = data_store_clone;
Ok(())
}
/// return MMDS data store value
/// We do not check size of data_store before returning a result because due
/// to limit from put/patch the data_store can not be bigger than the limit
/// imposed by the server.
pub fn data_store_value(&self) -> Value {
self.data_store.clone()
}
/// Returns the serde::Value in IMDS format plaintext.
/// Currently, only JSON objects and strings can be IMDS formatted.
///
/// See the docs for detailed description of the IMDS format:
/// <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html>
///
/// # Examples
///
/// ```json
/// {
/// "key1" : {
/// "key11": "value11"
/// "key12": "value12"
/// }
/// "key2" : "value3"
/// "key3" : "value3"
/// }
/// ```
///
/// IMDS formatted JSON object:
/// ```text
/// key1/
/// key2
/// key3
/// ```
///
/// JSON string:
/// ```json
/// "value"
/// ```
///
/// IMDS formatted string:
/// ```text
/// value
/// ```
///
/// If the `serde_json::Value` is not supported, an `UnsupportedValueType` error is returned.
fn format_imds(json: &Value) -> Result<String, MmdsDatastoreError> {
// If the `dict` is Value::Null, Error::NotFound is thrown.
// If the `dict` is not a dictionary, a Vec with the value corresponding to
// the key is returned.
match json.as_object() {
Some(map) => {
let mut ret = Vec::new();
// When the object is a map, push all the keys in the Vec.
for key in map.keys() {
let mut key = key.clone();
// If the key corresponds to a dictionary, a "/" is appended
// to the key name.
if map[&key].is_object() {
key.push('/');
}
ret.push(key);
}
Ok(ret.join("\n"))
}
None => {
// When the object is not a map, return the value.
// Support only `Value::String`.
match json.as_str() {
Some(str_val) => Ok(str_val.to_string()),
None => Err(MmdsDatastoreError::UnsupportedValueType),
}
}
}
}
/// Returns the subtree located at path. When the path corresponds to a leaf, it returns the
/// value. Returns Error::NotFound when the path is invalid.
pub fn get_value(
&self,
path: String,
format: OutputFormat,
) -> Result<String, MmdsDatastoreError> {
// The pointer function splits the input by "/". With a trailing "/", pointer does not
// know how to get the object.
let value = if path.ends_with('/') {
self.data_store.pointer(&path.as_str()[..(path.len() - 1)])
} else {
self.data_store.pointer(path.as_str())
};
if let Some(json) = value {
match self.imds_compat {
// EC2 IMDS ignores the Accept header.
true => Mmds::format_imds(json),
false => match format {
OutputFormat::Json => Ok(json.to_string()),
OutputFormat::Imds => Mmds::format_imds(json),
},
}
} else {
Err(MmdsDatastoreError::NotFound)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
impl Mmds {
fn get_data_str(&self) -> String {
if self.data_store.is_null() {
return String::from("{}");
}
self.data_store.to_string()
}
}
#[test]
fn test_display_mmds_version() {
assert_eq!(MmdsVersion::V1.to_string(), "V1");
assert_eq!(MmdsVersion::V2.to_string(), "V2");
assert_eq!(MmdsVersion::default().to_string(), "V1");
}
#[test]
fn test_mmds_version() {
let mut mmds = Mmds::default();
// Test default MMDS version.
assert_eq!(mmds.version(), MmdsVersion::V1);
// Test setting MMDS version to v2.
mmds.set_version(MmdsVersion::V2);
assert_eq!(mmds.version(), MmdsVersion::V2);
// Test setting MMDS version back to v1.
mmds.set_version(MmdsVersion::V1);
assert_eq!(mmds.version(), MmdsVersion::V1);
}
#[test]
fn test_mmds() {
let mut mmds = Mmds::default();
assert_eq!(
mmds.check_data_store_initialized().unwrap_err().to_string(),
"The MMDS data store is not initialized.".to_string(),
);
let mut mmds_json = "{\"meta-data\":{\"iam\":\"dummy\"},\"user-data\":\"1522850095\"}";
mmds.put_data(serde_json::from_str(mmds_json).unwrap())
.unwrap();
mmds.check_data_store_initialized().unwrap();
assert_eq!(mmds.get_data_str(), mmds_json);
// update the user-data field add test that patch works as expected
let patch_json = "{\"user-data\":\"10\"}";
mmds.patch_data(serde_json::from_str(patch_json).unwrap())
.unwrap();
mmds_json = "{\"meta-data\":{\"iam\":\"dummy\"},\"user-data\":\"10\"}";
assert_eq!(mmds.get_data_str(), mmds_json);
}
#[test]
fn test_get_value() {
for imds_compat in [false, true] {
let mut mmds = Mmds::default();
mmds.set_imds_compat(imds_compat);
let data = r#"{
"name": {
"first": "John",
"second": "Doe"
},
"age": 43,
"phones": [
"+401234567",
"+441234567"
],
"member": false,
"shares_percentage": 12.12,
"balance": -24,
"json_string": "{\n \"hello\": \"world\"\n}"
}"#;
let data_store: Value = serde_json::from_str(data).unwrap();
mmds.put_data(data_store).unwrap();
for format in [OutputFormat::Imds, OutputFormat::Json] {
// Test invalid path.
assert_eq!(
mmds.get_value("/invalid_path".to_string(), format)
.unwrap_err()
.to_string(),
MmdsDatastoreError::NotFound.to_string()
);
// Retrieve an object.
let expected = match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => "first\nsecond",
(false, OutputFormat::Json) => r#"{"first":"John","second":"Doe"}"#,
};
assert_eq!(
mmds.get_value("/name".to_string(), format).unwrap(),
expected
);
// Retrieve an integer.
match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => assert_eq!(
mmds.get_value("/age".to_string(), format)
.err()
.unwrap()
.to_string(),
MmdsDatastoreError::UnsupportedValueType.to_string()
),
(false, OutputFormat::Json) => {
assert_eq!(mmds.get_value("/age".to_string(), format).unwrap(), "43")
}
};
// Test path ends with /; Value is a dictionary.
// Retrieve an array.
match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => assert_eq!(
mmds.get_value("/phones/".to_string(), format)
.err()
.unwrap()
.to_string(),
MmdsDatastoreError::UnsupportedValueType.to_string()
),
(false, OutputFormat::Json) => assert_eq!(
mmds.get_value("/phones/".to_string(), format).unwrap(),
r#"["+401234567","+441234567"]"#
),
}
// Test path does NOT end with /; Value is a dictionary.
match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => assert_eq!(
mmds.get_value("/phones".to_string(), format)
.err()
.unwrap()
.to_string(),
MmdsDatastoreError::UnsupportedValueType.to_string()
),
(false, OutputFormat::Json) => assert_eq!(
mmds.get_value("/phones".to_string(), format).unwrap(),
r#"["+401234567","+441234567"]"#
),
}
// Retrieve the first element of an array.
let expected = match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => "+401234567",
(false, OutputFormat::Json) => "\"+401234567\"",
};
assert_eq!(
mmds.get_value("/phones/0/".to_string(), format).unwrap(),
expected
);
// Retrieve a boolean.
match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => assert_eq!(
mmds.get_value("/member".to_string(), format)
.err()
.unwrap()
.to_string(),
MmdsDatastoreError::UnsupportedValueType.to_string()
),
(false, OutputFormat::Json) => assert_eq!(
mmds.get_value("/member".to_string(), format).unwrap(),
"false"
),
}
// Retrieve a float.
match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => assert_eq!(
mmds.get_value("/shares_percentage".to_string(), format)
.err()
.unwrap()
.to_string(),
MmdsDatastoreError::UnsupportedValueType.to_string()
),
(false, OutputFormat::Json) => assert_eq!(
mmds.get_value("/shares_percentage".to_string(), format)
.unwrap(),
"12.12"
),
}
// Retrieve a negative integer.
match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => assert_eq!(
mmds.get_value("/balance".to_string(), format)
.err()
.unwrap()
.to_string(),
MmdsDatastoreError::UnsupportedValueType.to_string(),
),
(false, OutputFormat::Json) => assert_eq!(
mmds.get_value("/balance".to_string(), format).unwrap(),
"-24"
),
}
// Retrieve a string including escapes.
let expected = match (imds_compat, format) {
(false, OutputFormat::Imds) | (true, _) => "{\n \"hello\": \"world\"\n}",
(false, OutputFormat::Json) => r#""{\n \"hello\": \"world\"\n}""#,
};
assert_eq!(
mmds.get_value("/json_string".to_string(), format).unwrap(),
expected
);
}
}
}
#[test]
fn test_update_data_store() {
let mut mmds = Mmds::default();
let data = r#"{
"name": {
"first": "John",
"second": "Doe"
},
"age": "43"
}"#;
let data_store: Value = serde_json::from_str(data).unwrap();
mmds.put_data(data_store).unwrap();
let data = r#"{
"name": {
"first": "John",
"second": "Doe"
},
"age": "100"
}"#;
let data_store: Value = serde_json::from_str(data).unwrap();
mmds.patch_data(data_store).unwrap();
let data = r#"{
"name": {
"first": "John",
"second": "Doe"
},
"age": 43
}"#;
let data_store: Value = serde_json::from_str(data).unwrap();
mmds.put_data(data_store).unwrap();
let data = r#"{
"name": {
"first": "John",
"second": null
},
"age": "43"
}"#;
let data_store: Value = serde_json::from_str(data).unwrap();
mmds.patch_data(data_store).unwrap();
let filling = (0..51151).map(|_| "X").collect::<String>();
let data = "{\"new_key\": \"".to_string() + &filling + "\"}";
let data_store: Value = serde_json::from_str(&data).unwrap();
mmds.patch_data(data_store).unwrap();
let data = "{\"new_key2\" : \"smth\"}";
let data_store: Value = serde_json::from_str(data).unwrap();
assert_eq!(
mmds.patch_data(data_store).unwrap_err().to_string(),
MmdsDatastoreError::DataStoreLimitExceeded.to_string()
);
assert!(!mmds.get_data_str().contains("smth"));
let data = "{\"new_key\" : \"smth\"}";
let data_store: Value = serde_json::from_str(data).unwrap();
mmds.patch_data(data_store).unwrap();
assert!(mmds.get_data_str().contains("smth"));
assert_eq!(mmds.get_data_str().len(), 53);
let data = "{\"new_key2\" : \"smth2\"}";
let data_store: Value = serde_json::from_str(data).unwrap();
mmds.patch_data(data_store).unwrap();
assert!(mmds.get_data_str().contains("smth2"));
assert_eq!(mmds.get_data_str().len(), 72);
}
#[test]
fn test_put_size_limit() {
let mut mmds = Mmds::default();
let filling = (0..51300).map(|_| "X").collect::<String>();
let data = "{\"key\": \"".to_string() + &filling + "\"}";
let data_store: Value = serde_json::from_str(&data).unwrap();
assert_eq!(
mmds.put_data(data_store).unwrap_err().to_string(),
MmdsDatastoreError::DataStoreLimitExceeded.to_string()
);
assert_eq!(mmds.get_data_str().len(), 2);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/mmds/token_headers.rs | src/vmm/src/mmds/token_headers.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
// `X-Forwarded-For`
pub(crate) const X_FORWARDED_FOR_HEADER: &str = "x-forwarded-for";
// `X-metadata-token`
pub(crate) const X_METADATA_TOKEN_HEADER: &str = "x-metadata-token";
// `X-aws-ec2-metadata-token`
pub(crate) const X_AWS_EC2_METADATA_TOKEN_HEADER: &str = "x-aws-ec2-metadata-token";
// `X-metadata-token-ttl-seconds`
pub(crate) const X_METADATA_TOKEN_TTL_SECONDS_HEADER: &str = "x-metadata-token-ttl-seconds";
// `X-aws-ec2-metadata-token-ttl-seconds`
pub(crate) const X_AWS_EC2_METADATA_TOKEN_SSL_SECONDS_HEADER: &str =
"x-aws-ec2-metadata-token-ttl-seconds";
pub(crate) fn get_header_value_pair<'a>(
custom_headers: &'a HashMap<String, String>,
headers: &'a [&'static str],
) -> Option<(&'a String, &'a String)> {
custom_headers
.iter()
.find(|(k, _)| headers.iter().any(|header| k.eq_ignore_ascii_case(header)))
}
#[cfg(test)]
mod tests {
use super::*;
fn to_mixed_case(s: &str) -> String {
s.chars()
.enumerate()
.map(|(i, c)| {
if i % 2 == 0 {
c.to_ascii_lowercase()
} else {
c.to_ascii_uppercase()
}
})
.collect()
}
#[test]
fn test_get_header_value_pair() {
let headers = [X_METADATA_TOKEN_HEADER, X_AWS_EC2_METADATA_TOKEN_HEADER];
// No custom headers
let custom_headers = HashMap::default();
let token = get_header_value_pair(&custom_headers, &headers);
assert!(token.is_none());
// Unrelated custom headers
let custom_headers = HashMap::from([
("Some-Header".into(), "10".into()),
("Another-Header".into(), "value".into()),
]);
let token = get_header_value_pair(&custom_headers, &headers);
assert!(token.is_none());
for header in headers {
// Valid header
let expected = "THIS_IS_TOKEN";
let custom_headers = HashMap::from([(header.into(), expected.into())]);
let token = get_header_value_pair(&custom_headers, &headers).unwrap();
assert_eq!(token, (&header.into(), &expected.into()));
// Valid header in unrelated custom headers
let custom_headers = HashMap::from([
("Some-Header".into(), "10".into()),
("Another-Header".into(), "value".into()),
(header.into(), expected.into()),
]);
let token = get_header_value_pair(&custom_headers, &headers).unwrap();
assert_eq!(token, (&header.into(), &expected.into()));
// Test case-insensitiveness
let header = to_mixed_case(header);
let custom_headers = HashMap::from([(header.clone(), expected.into())]);
let token = get_header_value_pair(&custom_headers, &headers).unwrap();
assert_eq!(token, (&header, &expected.into()));
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/mmds/token.rs | src/vmm/src/mmds/token.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::TryInto;
use std::fmt;
use std::ops::Add;
use aws_lc_rs::aead::{AES_256_GCM, Aad, Nonce, RandomizedNonceKey};
use base64::Engine;
use bincode::config;
use bincode::config::{Configuration, Fixint, Limit, LittleEndian};
use serde::{Deserialize, Serialize};
use utils::time::{ClockType, get_time_ms};
/// Length of initialization vector.
pub const IV_LEN: usize = 12;
/// Length of the key used for encryption.
pub const KEY_LEN: usize = 32;
/// Length of encryption payload.
pub const PAYLOAD_LEN: usize = std::mem::size_of::<u64>();
/// Length of encryption tag.
pub const TAG_LEN: usize = 16;
/// Constant to convert seconds to milliseconds.
pub const MILLISECONDS_PER_SECOND: u64 = 1_000;
/// Minimum lifetime of token.
pub const MIN_TOKEN_TTL_SECONDS: u32 = 1;
/// Maximum lifetime of token.
pub const MAX_TOKEN_TTL_SECONDS: u32 = 21600;
/// Path to token.
pub const PATH_TO_TOKEN: &str = "/latest/api/token";
/// Token length limit to ensure we don't bother decrypting huge character
/// sequences. Tokens larger than this are automatically rejected. The value
/// is computed based on the expected length of the base64 encoded Token struct
/// including a small deviation.
const TOKEN_LENGTH_LIMIT: usize = 70;
/// Byte limit passed to `bincode` to guard against allocating
/// too much memory when deserializing tokens.
const DESERIALIZATION_BYTES_LIMIT: usize = std::mem::size_of::<Token>();
const BINCODE_CONFIG: Configuration<LittleEndian, Fixint, Limit<DESERIALIZATION_BYTES_LIMIT>> =
config::standard()
.with_fixed_int_encoding()
.with_limit::<DESERIALIZATION_BYTES_LIMIT>()
.with_little_endian();
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MmdsTokenError {
/// Failed to generate a key
KeyGeneration,
/// Failed to extract expiry value from token.
ExpiryExtraction,
/// Invalid time to live value provided for token: {0}. Please provide a value between {MIN_TOKEN_TTL_SECONDS:} and {MAX_TOKEN_TTL_SECONDS:}.
InvalidTtlValue(u32),
/// Bincode serialization failed: {0}.
Serialization(#[from] bincode::error::EncodeError),
/// Failed to encrypt token.
TokenEncryption,
}
pub struct TokenAuthority {
cipher: RandomizedNonceKey,
// Number of tokens encrypted under the current key.
num_encrypted_tokens: u32,
// Additional Authentication Data used for encryption and decryption.
aad: String,
}
// TODO When https://github.com/RustCrypto/AEADs/pull/532 is merged replace these manual
// implementation with `#[derive(Debug)]`.
impl fmt::Debug for TokenAuthority {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TokenAuthority")
.field("num_encrypted_tokens", &self.num_encrypted_tokens)
.field("aad", &self.aad)
.finish()
}
}
impl TokenAuthority {
/// Create a new token authority entity.
pub fn try_new() -> Result<TokenAuthority, MmdsTokenError> {
Ok(TokenAuthority {
cipher: TokenAuthority::create_cipher()?,
num_encrypted_tokens: 0,
aad: "".to_string(),
})
}
/// Set Additional Authenticated Data to be used for
/// encryption and decryption of the session token.
pub fn set_aad(&mut self, instance_id: &str) {
self.aad = format!("microvmid={}", instance_id);
}
/// Generate encoded token string using the token time to live provided.
pub fn generate_token_secret(&mut self, ttl_seconds: u32) -> Result<String, MmdsTokenError> {
// Check number of tokens encrypted under the current key. We need to
// make sure no more than 2^32 tokens are encrypted with the same key.
// If this number is reached, we need to reinitialize the cipher entity.
self.check_encryption_count()?;
// Create token structure containing the encrypted expiry value.
let token = self.create_token(ttl_seconds)?;
// Encode struct into base64 in order to obtain token string.
let encoded_token = token.base64_encode()?;
// Increase the count of encrypted tokens.
self.num_encrypted_tokens += 1;
Ok(encoded_token)
}
/// Create a new Token structure to encrypt.
fn create_token(&mut self, ttl_seconds: u32) -> Result<Token, MmdsTokenError> {
// Validate token time to live against bounds.
if !TokenAuthority::check_ttl(ttl_seconds) {
return Err(MmdsTokenError::InvalidTtlValue(ttl_seconds));
}
// Compute expiration time in milliseconds from ttl.
let expiry = TokenAuthority::compute_expiry(ttl_seconds);
// Encrypt expiry (RandomizedNonceKey generates nonce automatically).
self.encrypt_expiry(expiry)
}
/// Encrypt expiry using AES-GCM block cipher and return token obtained.
fn encrypt_expiry(&self, expiry: u64) -> Result<Token, MmdsTokenError> {
// Convert expiry u64 value into bytes.
let mut expiry_as_bytes = expiry.to_le_bytes();
let aad = Aad::from(self.aad.as_bytes());
let (nonce, tag) = self
.cipher
.seal_in_place_separate_tag(aad, &mut expiry_as_bytes)
.map_err(|_| MmdsTokenError::TokenEncryption)?;
// Tag must be of size `TAG_LEN`.
let tag_as_bytes: [u8; TAG_LEN] = tag
.as_ref()
.try_into()
.map_err(|_| MmdsTokenError::TokenEncryption)?;
Ok(Token::new(*nonce.as_ref(), expiry_as_bytes, tag_as_bytes))
}
/// Attempts to decrypt expiry value within token sequence. Returns false if expiry
/// cannot be decrypted. If decryption succeeds, returns true if token has not expired
/// (i.e. current time is greater than expiry) and false otherwise.
pub fn is_valid(&self, encoded_token: &str) -> bool {
// Check size of encoded token struct.
if encoded_token.len() > TOKEN_LENGTH_LIMIT {
return false;
}
// Decode token struct from base64.
let token = match Token::base64_decode(encoded_token) {
Ok(token) => token,
Err(_) => return false,
};
// Decrypt ttl using AES-GCM block cipher.
let expiry = match self.decrypt_expiry(&token) {
Ok(expiry) => expiry,
Err(_) => return false,
};
// Compare expiry (in ms) with current time in milliseconds.
expiry > get_time_ms(ClockType::Monotonic)
}
/// Decrypt ciphertext composed of payload and tag to obtain the expiry value.
fn decrypt_expiry(&self, token: &Token) -> Result<u64, MmdsTokenError> {
// Create Nonce object from initialization vector.
let nonce = Nonce::assume_unique_for_key(token.iv);
let aad = Aad::from(self.aad.as_bytes());
// Combine payload and tag for aws-lc-rs
let mut ciphertext_and_tag = [0; PAYLOAD_LEN + TAG_LEN];
ciphertext_and_tag[..PAYLOAD_LEN].copy_from_slice(&token.payload);
ciphertext_and_tag[PAYLOAD_LEN..].copy_from_slice(&token.tag);
// Decrypt in place
let plaintext = self
.cipher
.open_in_place(nonce, aad, &mut ciphertext_and_tag)
.map_err(|_| MmdsTokenError::ExpiryExtraction)?;
let expiry_as_bytes: [u8; PAYLOAD_LEN] = plaintext
.try_into()
.map_err(|_| MmdsTokenError::ExpiryExtraction)?;
// Return expiry value in seconds.
Ok(u64::from_le_bytes(expiry_as_bytes))
}
/// Create a new AES-GCM cipher entity.
fn create_cipher() -> Result<RandomizedNonceKey, MmdsTokenError> {
// Randomly generate a 256-bit key to be used for encryption/decryption purposes.
let mut key = [0u8; KEY_LEN];
aws_lc_rs::rand::fill(&mut key).map_err(|_| MmdsTokenError::KeyGeneration)?;
// Create cipher entity to handle encryption/decryption.
RandomizedNonceKey::new(&AES_256_GCM, &key).map_err(|_| MmdsTokenError::KeyGeneration)
}
/// Make sure to reinitialize the cipher under a new key before reaching
/// a count of 2^32 encrypted tokens under the same cipher entity.
fn check_encryption_count(&mut self) -> Result<(), MmdsTokenError> {
// Make sure no more than 2^32 - 1 tokens are encrypted under
// the same encryption key.
if self.num_encrypted_tokens == u32::MAX {
// Reinitialize the cipher entity under a new key when limit is exceeded.
// As a result, all valid tokens created under the previous key are invalidated.
// By design, we don't retain the cipher used to encrypt previous tokens,
// because reaching the limit is very unlikely and should not happen under
// healthy interactions with MMDS. However, if it happens, we expect the
// customer code to have a retry mechanism in place and regenerate the
// session token if the previous ones become invalid.
self.cipher = TokenAuthority::create_cipher()?;
// Reset encrypted tokens count.
self.num_encrypted_tokens = 0;
crate::logger::warn!(
"The limit of tokens generated under current MMDS token authority
has been reached. MMDS's token authority entity has been reseeded
and all previously created tokens are now invalid."
);
}
Ok(())
}
/// Validate the token time to live against bounds.
fn check_ttl(ttl_seconds: u32) -> bool {
(MIN_TOKEN_TTL_SECONDS..=MAX_TOKEN_TTL_SECONDS).contains(&ttl_seconds)
}
/// Compute expiry time in seconds by adding the time to live provided
/// to the current time measured in milliseconds.
fn compute_expiry(ttl_as_seconds: u32) -> u64 {
// Get current time in milliseconds.
let now_as_milliseconds = get_time_ms(ClockType::Monotonic);
// Compute expiry by adding ttl value converted to milliseconds
// to current time (also in milliseconds). This addition is safe
// because ttl is verified beforehand and can never be more than
// 6h (21_600_000 ms).
now_as_milliseconds.add(u64::from(ttl_as_seconds) * MILLISECONDS_PER_SECOND)
}
}
/// Structure for token information.
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
struct Token {
// Nonce or Initialization Vector.
iv: [u8; IV_LEN],
// Encrypted expire time.
payload: [u8; PAYLOAD_LEN],
// Tag returned after encryption.
tag: [u8; TAG_LEN],
}
impl Token {
/// Create a new token struct.
fn new(iv: [u8; IV_LEN], payload: [u8; PAYLOAD_LEN], tag: [u8; TAG_LEN]) -> Self {
Token { iv, payload, tag }
}
/// Encode token structure into a string using base64 encoding.
fn base64_encode(&self) -> Result<String, MmdsTokenError> {
let token_bytes: Vec<u8> = bincode::serde::encode_to_vec(self, BINCODE_CONFIG)?;
// Encode token structure bytes into base64.
Ok(base64::engine::general_purpose::STANDARD.encode(token_bytes))
}
/// Decode token structure from base64 string.
fn base64_decode(encoded_token: &str) -> Result<Self, MmdsTokenError> {
let token_bytes = base64::engine::general_purpose::STANDARD
.decode(encoded_token)
.map_err(|_| MmdsTokenError::ExpiryExtraction)?;
let token: Token = bincode::serde::decode_from_slice(&token_bytes, BINCODE_CONFIG)
.map_err(|_| MmdsTokenError::ExpiryExtraction)?
.0;
Ok(token)
}
}
#[cfg(test)]
mod tests {
use std::thread::sleep;
use std::time::Duration;
use super::*;
#[test]
fn test_check_tll() {
// Test invalid time to live values.
assert!(!TokenAuthority::check_ttl(MIN_TOKEN_TTL_SECONDS - 1));
assert!(!TokenAuthority::check_ttl(MAX_TOKEN_TTL_SECONDS + 1));
// Test time to live value within bounds.
assert!(TokenAuthority::check_ttl(MIN_TOKEN_TTL_SECONDS));
assert!(TokenAuthority::check_ttl(MAX_TOKEN_TTL_SECONDS / 2));
assert!(TokenAuthority::check_ttl(MAX_TOKEN_TTL_SECONDS));
}
#[test]
fn test_set_aad() {
let mut token_authority = TokenAuthority::try_new().unwrap();
assert_eq!(token_authority.aad, "".to_string());
token_authority.set_aad("foo");
assert_eq!(token_authority.aad, "microvmid=foo".to_string());
}
#[test]
fn test_create_token() {
let mut token_authority = TokenAuthority::try_new().unwrap();
// Test invalid time to live value.
assert_eq!(
token_authority.create_token(0).unwrap_err().to_string(),
format!(
"Invalid time to live value provided for token: 0. Please provide a value between \
{} and {}.",
MIN_TOKEN_TTL_SECONDS, MAX_TOKEN_TTL_SECONDS
)
);
// Test valid time to live value.
let token = token_authority.create_token(1).unwrap();
assert_eq!(token.iv.len(), IV_LEN);
assert_eq!(token.payload.len(), PAYLOAD_LEN);
assert_eq!(token.tag.len(), TAG_LEN);
}
#[test]
fn test_compute_expiry() {
let time_now = get_time_ms(ClockType::Monotonic);
let expiry = TokenAuthority::compute_expiry(1);
let ttl = expiry - time_now;
// We allow a deviation of 20ms to account for the gap
// between the two calls to `get_time_ms()`.
let deviation = 20;
assert!(
ttl >= MILLISECONDS_PER_SECOND && ttl <= MILLISECONDS_PER_SECOND + deviation,
"ttl={ttl} not within [{MILLISECONDS_PER_SECOND}, \
{MILLISECONDS_PER_SECOND}+{deviation}]",
);
let time_now = get_time_ms(ClockType::Monotonic);
let expiry = TokenAuthority::compute_expiry(0);
let ttl = expiry - time_now;
assert!(ttl <= deviation, "ttl={ttl} is greater than {deviation}");
}
#[test]
fn test_encrypt_decrypt() {
let mut token_authority = TokenAuthority::try_new().unwrap();
let expiry = TokenAuthority::compute_expiry(10);
// Test valid ciphertext.
let token = token_authority.encrypt_expiry(expiry).unwrap();
let decrypted_expiry = token_authority.decrypt_expiry(&token).unwrap();
assert_eq!(expiry, decrypted_expiry);
// Test ciphertext with corrupted payload.
let mut bad_token = token.clone();
bad_token.payload[0] = u8::MAX - bad_token.payload[0];
assert!(matches!(
token_authority.decrypt_expiry(&bad_token).unwrap_err(),
MmdsTokenError::ExpiryExtraction
));
// Test ciphertext with corrupted tag.
let mut bad_token = token.clone();
bad_token.tag[0] = u8::MAX - bad_token.tag[0];
assert!(matches!(
token_authority.decrypt_expiry(&bad_token).unwrap_err(),
MmdsTokenError::ExpiryExtraction
));
// Test decrypting expiry under a different AAD than it was encrypted with.
token_authority.set_aad("foo");
assert!(matches!(
token_authority.decrypt_expiry(&token).unwrap_err(),
MmdsTokenError::ExpiryExtraction
));
}
#[test]
fn test_encode_decode() {
let expected_token = Token::new([0u8; IV_LEN], [0u8; PAYLOAD_LEN], [0u8; TAG_LEN]);
let mut encoded_token = expected_token.base64_encode().unwrap();
let actual_token = Token::base64_decode(&encoded_token).unwrap();
assert_eq!(actual_token, expected_token);
// Decode invalid base64 bytes sequence.
encoded_token.push('x');
Token::base64_decode(&encoded_token).unwrap_err();
}
#[test]
fn test_generate_token_secret() {
let mut token_authority = TokenAuthority::try_new().unwrap();
// Test time to live value too small.
assert_eq!(
token_authority
.generate_token_secret(MIN_TOKEN_TTL_SECONDS - 1)
.unwrap_err()
.to_string(),
format!(
"Invalid time to live value provided for token: {}. Please provide a value \
between {} and {}.",
MIN_TOKEN_TTL_SECONDS - 1,
MIN_TOKEN_TTL_SECONDS,
MAX_TOKEN_TTL_SECONDS
)
);
// Test time to live value too big.
assert_eq!(
token_authority
.generate_token_secret(MAX_TOKEN_TTL_SECONDS + 1)
.unwrap_err()
.to_string(),
format!(
"Invalid time to live value provided for token: {}. Please provide a value \
between {} and {}.",
MAX_TOKEN_TTL_SECONDS + 1,
MIN_TOKEN_TTL_SECONDS,
MAX_TOKEN_TTL_SECONDS
)
);
// Generate token with lifespan of 60 seconds.
let _ = token_authority.generate_token_secret(60).unwrap();
assert_eq!(token_authority.num_encrypted_tokens, 1);
}
#[test]
fn test_is_valid() {
let mut token_authority = TokenAuthority::try_new().unwrap();
// Test token with size bigger than expected.
assert!(!token_authority.is_valid(str::repeat("a", TOKEN_LENGTH_LIMIT + 1).as_str()));
// Test valid token.
let token0 = token_authority.generate_token_secret(1).unwrap();
assert!(token_authority.is_valid(&token0));
}
#[test]
fn test_token_authority() {
let mut token_authority = TokenAuthority::try_new().unwrap();
// Generate token with lifespan of 60 seconds.
let token0 = token_authority.generate_token_secret(60).unwrap();
assert!(token_authority.is_valid(&token0));
// Generate token with lifespan of one second.
let token1 = token_authority.generate_token_secret(1).unwrap();
assert_eq!(token_authority.num_encrypted_tokens, 2);
assert!(token_authority.is_valid(&token1));
// Wait for `token1` to expire.
sleep(Duration::new(1, 0));
assert!(!token_authority.is_valid(&token1));
// The first token should still be valid.
assert!(token_authority.is_valid(&token0));
// Simulate reaching to a count of 2^32 encrypted tokens.
// The cipher and count should reset at this point and previous
// tokens should become invalid.
token_authority.num_encrypted_tokens = u32::MAX;
let token2 = token_authority.generate_token_secret(60).unwrap();
assert_eq!(token_authority.num_encrypted_tokens, 1);
assert!(token_authority.is_valid(&token2));
assert!(!token_authority.is_valid(&token0));
assert!(!token_authority.is_valid(&token1));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/mod.rs | src/vmm/src/devices/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Emulates virtual and hardware devices.
#![allow(unused)]
use std::io;
pub mod acpi;
pub mod legacy;
pub mod pci;
pub mod pseudo;
pub mod virtio;
use log::error;
use crate::devices::virtio::net::metrics::NetDeviceMetrics;
use crate::devices::virtio::queue::{InvalidAvailIdx, QueueError};
use crate::devices::virtio::vsock::VsockError;
use crate::logger::IncMetric;
use crate::vstate::interrupts::InterruptError;
// Function used for reporting error in terms of logging
// but also in terms of metrics of net event fails.
// network metrics is reported per device so we need a handle to each net device's
// metrics `net_iface_metrics` to report metrics for that device.
pub(crate) fn report_net_event_fail(net_iface_metrics: &NetDeviceMetrics, err: DeviceError) {
if let DeviceError::InvalidAvailIdx(err) = err {
panic!("{}", err);
}
error!("{:?}", err);
net_iface_metrics.event_fails.inc();
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum DeviceError {
/// Failed to read from the TAP device.
FailedReadTap,
/// Failed to signal irq: {0}
FailedSignalingIrq(#[from] InterruptError),
/// IO error: {0}
IoError(io::Error),
/// Device received malformed payload.
MalformedPayload,
/// Device received malformed descriptor.
MalformedDescriptor,
/// Error during queue processing: {0}
QueueError(#[from] QueueError),
/// {0}
InvalidAvailIdx(#[from] InvalidAvailIdx),
/// Vsock device error: {0}
VsockError(#[from] VsockError),
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/acpi/vmgenid.rs | src/vmm/src/devices/acpi/vmgenid.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::Infallible;
use acpi_tables::{Aml, aml};
use aws_lc_rs::error::Unspecified as RandError;
use aws_lc_rs::rand;
use log::{debug, error};
use serde::{Deserialize, Serialize};
use vm_memory::{GuestAddress, GuestMemoryError};
use vm_superio::Trigger;
use vmm_sys_util::eventfd::EventFd;
use super::super::legacy::EventFdTrigger;
use crate::snapshot::Persist;
use crate::vstate::memory::{Bytes, GuestMemoryMmap};
use crate::vstate::resources::ResourceAllocator;
/// Bytes of memory we allocate for VMGenID device
pub const VMGENID_MEM_SIZE: u64 = 16;
/// Virtual Machine Generation ID device
///
/// VMGenID is an emulated device which exposes to the guest a 128-bit cryptographically random
/// integer value which will be different every time the virtual machine executes from a different
/// configuration file. In Firecracker terms this translates to a different value every time a new
/// microVM is created, either from scratch or restored from a snapshot.
///
/// The device specification can be found here: https://go.microsoft.com/fwlink/?LinkId=260709
#[derive(Debug)]
pub struct VmGenId {
/// Current generation ID of guest VM
pub gen_id: u128,
/// Interrupt line for notifying the device about generation ID changes
pub interrupt_evt: EventFdTrigger,
/// Guest physical address where VMGenID data lives.
pub guest_address: GuestAddress,
/// GSI number for the device
pub gsi: u32,
}
impl VmGenId {
/// Create a new Vm Generation Id device using an address in the guest for writing the
/// generation ID and a GSI for sending device notifications.
pub fn from_parts(guest_address: GuestAddress, gsi: u32) -> Self {
debug!(
"vmgenid: building VMGenID device. Address: {:#010x}. IRQ: {}",
guest_address.0, gsi
);
let interrupt_evt = EventFdTrigger::new(
EventFd::new(libc::EFD_NONBLOCK)
.expect("vmgenid: Could not create EventFd for VMGenID device"),
);
let gen_id = Self::make_genid();
Self {
gen_id,
interrupt_evt,
guest_address,
gsi,
}
}
/// Create a new VMGenID device
///
/// Allocate memory and a GSI for sending notifications and build the device
pub fn new(resource_allocator: &mut ResourceAllocator) -> Self {
let gsi = resource_allocator
.allocate_gsi_legacy(1)
.expect("vmgenid: Could not allocate GSI for VMGenID");
// The generation ID needs to live in an 8-byte aligned buffer
let addr = resource_allocator
.allocate_system_memory(VMGENID_MEM_SIZE, 8, vm_allocator::AllocPolicy::LastMatch)
.expect("vmgenid: Could not allocate guest RAM for VMGenID");
Self::from_parts(GuestAddress(addr), gsi[0])
}
// Create a 16-bytes random number
fn make_genid() -> u128 {
let mut gen_id_bytes = [0u8; 16];
rand::fill(&mut gen_id_bytes).expect("vmgenid: could not create new generation ID");
u128::from_le_bytes(gen_id_bytes)
}
/// Send an ACPI notification to guest device.
///
/// This will only have effect if we have updated the generation ID in guest memory, i.e. when
/// re-creating the device after snapshot resumption.
pub fn notify_guest(&mut self) -> Result<(), std::io::Error> {
self.interrupt_evt
.trigger()
.inspect_err(|err| error!("vmgenid: could not send guest notification: {err}"))?;
debug!("vmgenid: notifying guest about new generation ID");
Ok(())
}
/// Attach the [`VmGenId`] device
pub fn activate(&self, mem: &GuestMemoryMmap) -> Result<(), GuestMemoryError> {
debug!(
"vmgenid: writing new generation ID to guest: {:#034x}",
self.gen_id
);
mem.write_slice(&self.gen_id.to_le_bytes(), self.guest_address)
.inspect_err(|err| error!("vmgenid: could not write generation ID to guest: {err}"))?;
Ok(())
}
}
/// Logic to save/restore the state of a VMGenID device
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct VMGenIDState {
/// GSI used for VMGenID device
pub gsi: u32,
/// memory address of generation ID
pub addr: u64,
}
impl<'a> Persist<'a> for VmGenId {
type State = VMGenIDState;
type ConstructorArgs = ();
type Error = Infallible;
fn save(&self) -> Self::State {
VMGenIDState {
gsi: self.gsi,
addr: self.guest_address.0,
}
}
fn restore(_: Self::ConstructorArgs, state: &Self::State) -> Result<Self, Self::Error> {
Ok(Self::from_parts(GuestAddress(state.addr), state.gsi))
}
}
impl Aml for VmGenId {
fn append_aml_bytes(&self, v: &mut Vec<u8>) -> Result<(), aml::AmlError> {
#[allow(clippy::cast_possible_truncation)]
let addr_low = self.guest_address.0 as u32;
let addr_high = (self.guest_address.0 >> 32) as u32;
aml::Device::new(
"_SB_.VGEN".try_into()?,
vec![
&aml::Name::new("_HID".try_into()?, &"FCVMGID")?,
&aml::Name::new("_CID".try_into()?, &"VM_Gen_Counter")?,
&aml::Name::new("_DDN".try_into()?, &"VM_Gen_Counter")?,
&aml::Name::new(
"ADDR".try_into()?,
&aml::Package::new(vec![&addr_low, &addr_high]),
)?,
],
)
.append_aml_bytes(v)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/acpi/mod.rs | src/vmm/src/devices/acpi/mod.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
mod generated;
pub mod vmclock;
pub mod vmgenid;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/acpi/vmclock.rs | src/vmm/src/devices/acpi/vmclock.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::Infallible;
use std::mem::offset_of;
use std::sync::atomic::{Ordering, fence};
use acpi_tables::{Aml, aml};
use log::error;
use serde::{Deserialize, Serialize};
use vm_allocator::AllocPolicy;
use vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryError};
use crate::devices::acpi::generated::vmclock_abi::{
VMCLOCK_COUNTER_INVALID, VMCLOCK_MAGIC, VMCLOCK_STATUS_UNKNOWN, vmclock_abi,
};
use crate::snapshot::Persist;
use crate::vstate::memory::GuestMemoryMmap;
use crate::vstate::resources::ResourceAllocator;
// SAFETY: `vmclock_abi` is a POD
unsafe impl ByteValued for vmclock_abi {}
// We are reserving a physical page to expose the [`VmClock`] data
const VMCLOCK_SIZE: u32 = 0x1000;
// Write a value in `vmclock_abi` both in the Firecracker-managed state
// and inside guest memory address that corresponds to it.
macro_rules! write_vmclock_field {
($vmclock:expr, $mem:expr, $field:ident, $value:expr) => {
$vmclock.inner.$field = $value;
$mem.write_obj(
$vmclock.inner.$field,
$vmclock
.guest_address
.unchecked_add(offset_of!(vmclock_abi, $field) as u64),
);
};
}
/// VMclock device
///
/// This device emulates the VMclock device which allows passing information to the guest related
/// to the relation of the host CPU to real-time clock as well as information about disruptive
/// events, such as live-migration.
#[derive(Debug)]
pub struct VmClock {
/// Guest address in which we will write the VMclock struct
pub guest_address: GuestAddress,
/// The [`VmClock`] state we are exposing to the guest
inner: vmclock_abi,
}
impl VmClock {
/// Create a new [`VmClock`] device for a newly booted VM
pub fn new(resource_allocator: &mut ResourceAllocator) -> VmClock {
let addr = resource_allocator
.allocate_system_memory(
VMCLOCK_SIZE as u64,
VMCLOCK_SIZE as u64,
AllocPolicy::LastMatch,
)
.expect("vmclock: could not allocate guest memory for device");
let mut inner = vmclock_abi {
magic: VMCLOCK_MAGIC,
size: VMCLOCK_SIZE,
version: 1,
clock_status: VMCLOCK_STATUS_UNKNOWN,
counter_id: VMCLOCK_COUNTER_INVALID,
..Default::default()
};
VmClock {
guest_address: GuestAddress(addr),
inner,
}
}
/// Activate [`VmClock`] device
pub fn activate(&self, mem: &GuestMemoryMmap) -> Result<(), GuestMemoryError> {
mem.write_slice(self.inner.as_slice(), self.guest_address)?;
Ok(())
}
/// Bump the VM generation counter
pub fn post_load_update(&mut self, mem: &GuestMemoryMmap) {
write_vmclock_field!(self, mem, seq_count, self.inner.seq_count | 1);
// This fence ensures guest sees all previous writes. It is matched to a
// read barrier in the guest.
fence(Ordering::Release);
write_vmclock_field!(
self,
mem,
disruption_marker,
self.inner.disruption_marker.wrapping_add(1)
);
// This fence ensures guest sees the `disruption_marker` update. It is matched to a
// read barrier in the guest.
fence(Ordering::Release);
write_vmclock_field!(self, mem, seq_count, self.inner.seq_count.wrapping_add(1));
}
}
/// (De)serialize-able state of the [`VmClock`]
///
/// We could avoid this and reuse [`VmClock`] itself if `GuestAddress` was `Serialize`/`Deserialize`
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct VmClockState {
/// Guest address in which we write the [`VmClock`] info
pub guest_address: u64,
/// Data we expose to the guest
pub inner: vmclock_abi,
}
impl<'a> Persist<'a> for VmClock {
type State = VmClockState;
type ConstructorArgs = &'a GuestMemoryMmap;
type Error = Infallible;
fn save(&self) -> Self::State {
VmClockState {
guest_address: self.guest_address.0,
inner: self.inner,
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let mut vmclock = VmClock {
guest_address: GuestAddress(state.guest_address),
inner: state.inner,
};
vmclock.post_load_update(constructor_args);
Ok(vmclock)
}
}
impl Aml for VmClock {
fn append_aml_bytes(&self, v: &mut Vec<u8>) -> Result<(), aml::AmlError> {
aml::Device::new(
"_SB_.VCLK".try_into()?,
vec![
&aml::Name::new("_HID".try_into()?, &"AMZNC10C")?,
&aml::Name::new("_CID".try_into()?, &"VMCLOCK")?,
&aml::Name::new("_DDN".try_into()?, &"VMCLOCK")?,
&aml::Method::new(
"_STA".try_into()?,
0,
false,
vec![&aml::Return::new(&0x0fu8)],
),
&aml::Name::new(
"_CRS".try_into()?,
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
aml::AddressSpaceCacheable::Cacheable,
false,
self.guest_address.0,
self.guest_address.0 + VMCLOCK_SIZE as u64 - 1,
)?]),
)?,
],
)
.append_aml_bytes(v)
}
}
#[cfg(test)]
mod tests {
use vm_memory::{Bytes, GuestAddress};
use crate::arch;
use crate::devices::acpi::generated::vmclock_abi::vmclock_abi;
use crate::devices::acpi::vmclock::{VMCLOCK_SIZE, VmClock};
use crate::snapshot::Persist;
use crate::test_utils::single_region_mem;
use crate::utils::u64_to_usize;
use crate::vstate::resources::ResourceAllocator;
// We are allocating memory from the end of the system memory portion
const VMCLOCK_TEST_GUEST_ADDR: GuestAddress =
GuestAddress(arch::SYSTEM_MEM_START + arch::SYSTEM_MEM_SIZE - VMCLOCK_SIZE as u64);
fn default_vmclock() -> VmClock {
let mut resource_allocator = ResourceAllocator::new();
VmClock::new(&mut resource_allocator)
}
#[test]
fn test_new_device() {
let vmclock = default_vmclock();
let mem = single_region_mem(
u64_to_usize(arch::SYSTEM_MEM_START) + u64_to_usize(arch::SYSTEM_MEM_SIZE),
);
let guest_data: vmclock_abi = mem.read_obj(VMCLOCK_TEST_GUEST_ADDR).unwrap();
assert_ne!(guest_data, vmclock.inner);
vmclock.activate(&mem);
let guest_data: vmclock_abi = mem.read_obj(VMCLOCK_TEST_GUEST_ADDR).unwrap();
assert_eq!(guest_data, vmclock.inner);
}
#[test]
fn test_device_save_restore() {
let vmclock = default_vmclock();
let mem = single_region_mem(
u64_to_usize(arch::SYSTEM_MEM_START) + u64_to_usize(arch::SYSTEM_MEM_SIZE),
);
vmclock.activate(&mem).unwrap();
let guest_data: vmclock_abi = mem.read_obj(VMCLOCK_TEST_GUEST_ADDR).unwrap();
let state = vmclock.save();
let vmclock_new = VmClock::restore(&mem, &state).unwrap();
let guest_data_new: vmclock_abi = mem.read_obj(VMCLOCK_TEST_GUEST_ADDR).unwrap();
assert_ne!(guest_data_new, vmclock.inner);
assert_eq!(guest_data_new, vmclock_new.inner);
assert_eq!(
vmclock.inner.disruption_marker + 1,
vmclock_new.inner.disruption_marker
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/acpi/generated/mod.rs | src/vmm/src/devices/acpi/generated/mod.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::all)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
pub mod vmclock_abi;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/acpi/generated/vmclock_abi.rs | src/vmm/src/devices/acpi/generated/vmclock_abi.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
use serde::{Deserialize, Serialize};
pub const __BITS_PER_LONG: u32 = 64;
pub const __BITS_PER_LONG_LONG: u32 = 64;
pub const __FD_SETSIZE: u32 = 1024;
pub const VMCLOCK_MAGIC: u32 = 1263289174;
pub const VMCLOCK_COUNTER_ARM_VCNT: u8 = 0;
pub const VMCLOCK_COUNTER_X86_TSC: u8 = 1;
pub const VMCLOCK_COUNTER_INVALID: u8 = 255;
pub const VMCLOCK_TIME_UTC: u8 = 0;
pub const VMCLOCK_TIME_TAI: u8 = 1;
pub const VMCLOCK_TIME_MONOTONIC: u8 = 2;
pub const VMCLOCK_TIME_INVALID_SMEARED: u8 = 3;
pub const VMCLOCK_TIME_INVALID_MAYBE_SMEARED: u8 = 4;
pub const VMCLOCK_FLAG_TAI_OFFSET_VALID: u64 = 1;
pub const VMCLOCK_FLAG_DISRUPTION_SOON: u64 = 2;
pub const VMCLOCK_FLAG_DISRUPTION_IMMINENT: u64 = 4;
pub const VMCLOCK_FLAG_PERIOD_ESTERROR_VALID: u64 = 8;
pub const VMCLOCK_FLAG_PERIOD_MAXERROR_VALID: u64 = 16;
pub const VMCLOCK_FLAG_TIME_ESTERROR_VALID: u64 = 32;
pub const VMCLOCK_FLAG_TIME_MAXERROR_VALID: u64 = 64;
pub const VMCLOCK_FLAG_TIME_MONOTONIC: u64 = 128;
pub const VMCLOCK_STATUS_UNKNOWN: u8 = 0;
pub const VMCLOCK_STATUS_INITIALIZING: u8 = 1;
pub const VMCLOCK_STATUS_SYNCHRONIZED: u8 = 2;
pub const VMCLOCK_STATUS_FREERUNNING: u8 = 3;
pub const VMCLOCK_STATUS_UNRELIABLE: u8 = 4;
pub const VMCLOCK_SMEARING_STRICT: u8 = 0;
pub const VMCLOCK_SMEARING_NOON_LINEAR: u8 = 1;
pub const VMCLOCK_SMEARING_UTC_SLS: u8 = 2;
pub const VMCLOCK_LEAP_NONE: u8 = 0;
pub const VMCLOCK_LEAP_PRE_POS: u8 = 1;
pub const VMCLOCK_LEAP_PRE_NEG: u8 = 2;
pub const VMCLOCK_LEAP_POS: u8 = 3;
pub const VMCLOCK_LEAP_POST_POS: u8 = 4;
pub const VMCLOCK_LEAP_POST_NEG: u8 = 5;
pub type __s8 = ::std::os::raw::c_schar;
pub type __u8 = ::std::os::raw::c_uchar;
pub type __s16 = ::std::os::raw::c_short;
pub type __u16 = ::std::os::raw::c_ushort;
pub type __s32 = ::std::os::raw::c_int;
pub type __u32 = ::std::os::raw::c_uint;
pub type __s64 = ::std::os::raw::c_longlong;
pub type __u64 = ::std::os::raw::c_ulonglong;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct __kernel_fd_set {
pub fds_bits: [::std::os::raw::c_ulong; 16usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of __kernel_fd_set"][::std::mem::size_of::<__kernel_fd_set>() - 128usize];
["Alignment of __kernel_fd_set"][::std::mem::align_of::<__kernel_fd_set>() - 8usize];
["Offset of field: __kernel_fd_set::fds_bits"]
[::std::mem::offset_of!(__kernel_fd_set, fds_bits) - 0usize];
};
pub type __kernel_sighandler_t =
::std::option::Option<unsafe extern "C" fn(arg1: ::std::os::raw::c_int)>;
pub type __kernel_key_t = ::std::os::raw::c_int;
pub type __kernel_mqd_t = ::std::os::raw::c_int;
pub type __kernel_old_uid_t = ::std::os::raw::c_ushort;
pub type __kernel_old_gid_t = ::std::os::raw::c_ushort;
pub type __kernel_old_dev_t = ::std::os::raw::c_ulong;
pub type __kernel_long_t = ::std::os::raw::c_long;
pub type __kernel_ulong_t = ::std::os::raw::c_ulong;
pub type __kernel_ino_t = __kernel_ulong_t;
pub type __kernel_mode_t = ::std::os::raw::c_uint;
pub type __kernel_pid_t = ::std::os::raw::c_int;
pub type __kernel_ipc_pid_t = ::std::os::raw::c_int;
pub type __kernel_uid_t = ::std::os::raw::c_uint;
pub type __kernel_gid_t = ::std::os::raw::c_uint;
pub type __kernel_suseconds_t = __kernel_long_t;
pub type __kernel_daddr_t = ::std::os::raw::c_int;
pub type __kernel_uid32_t = ::std::os::raw::c_uint;
pub type __kernel_gid32_t = ::std::os::raw::c_uint;
pub type __kernel_size_t = __kernel_ulong_t;
pub type __kernel_ssize_t = __kernel_long_t;
pub type __kernel_ptrdiff_t = __kernel_long_t;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct __kernel_fsid_t {
pub val: [::std::os::raw::c_int; 2usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of __kernel_fsid_t"][::std::mem::size_of::<__kernel_fsid_t>() - 8usize];
["Alignment of __kernel_fsid_t"][::std::mem::align_of::<__kernel_fsid_t>() - 4usize];
["Offset of field: __kernel_fsid_t::val"]
[::std::mem::offset_of!(__kernel_fsid_t, val) - 0usize];
};
pub type __kernel_off_t = __kernel_long_t;
pub type __kernel_loff_t = ::std::os::raw::c_longlong;
pub type __kernel_old_time_t = __kernel_long_t;
pub type __kernel_time_t = __kernel_long_t;
pub type __kernel_time64_t = ::std::os::raw::c_longlong;
pub type __kernel_clock_t = __kernel_long_t;
pub type __kernel_timer_t = ::std::os::raw::c_int;
pub type __kernel_clockid_t = ::std::os::raw::c_int;
pub type __kernel_caddr_t = *mut ::std::os::raw::c_char;
pub type __kernel_uid16_t = ::std::os::raw::c_ushort;
pub type __kernel_gid16_t = ::std::os::raw::c_ushort;
pub type __s128 = i128;
pub type __u128 = u128;
pub type __le16 = __u16;
pub type __be16 = __u16;
pub type __le32 = __u32;
pub type __be32 = __u32;
pub type __le64 = __u64;
pub type __be64 = __u64;
pub type __sum16 = __u16;
pub type __wsum = __u32;
pub type __poll_t = ::std::os::raw::c_uint;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct vmclock_abi {
pub magic: __le32,
pub size: __le32,
pub version: __le16,
pub counter_id: __u8,
pub time_type: __u8,
pub seq_count: __le32,
pub disruption_marker: __le64,
pub flags: __le64,
pub pad: [__u8; 2usize],
pub clock_status: __u8,
pub leap_second_smearing_hint: __u8,
pub tai_offset_sec: __le16,
pub leap_indicator: __u8,
pub counter_period_shift: __u8,
pub counter_value: __le64,
pub counter_period_frac_sec: __le64,
pub counter_period_esterror_rate_frac_sec: __le64,
pub counter_period_maxerror_rate_frac_sec: __le64,
pub time_sec: __le64,
pub time_frac_sec: __le64,
pub time_esterror_nanosec: __le64,
pub time_maxerror_nanosec: __le64,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of vmclock_abi"][::std::mem::size_of::<vmclock_abi>() - 104usize];
["Alignment of vmclock_abi"][::std::mem::align_of::<vmclock_abi>() - 8usize];
["Offset of field: vmclock_abi::magic"][::std::mem::offset_of!(vmclock_abi, magic) - 0usize];
["Offset of field: vmclock_abi::size"][::std::mem::offset_of!(vmclock_abi, size) - 4usize];
["Offset of field: vmclock_abi::version"]
[::std::mem::offset_of!(vmclock_abi, version) - 8usize];
["Offset of field: vmclock_abi::counter_id"]
[::std::mem::offset_of!(vmclock_abi, counter_id) - 10usize];
["Offset of field: vmclock_abi::time_type"]
[::std::mem::offset_of!(vmclock_abi, time_type) - 11usize];
["Offset of field: vmclock_abi::seq_count"]
[::std::mem::offset_of!(vmclock_abi, seq_count) - 12usize];
["Offset of field: vmclock_abi::disruption_marker"]
[::std::mem::offset_of!(vmclock_abi, disruption_marker) - 16usize];
["Offset of field: vmclock_abi::flags"][::std::mem::offset_of!(vmclock_abi, flags) - 24usize];
["Offset of field: vmclock_abi::pad"][::std::mem::offset_of!(vmclock_abi, pad) - 32usize];
["Offset of field: vmclock_abi::clock_status"]
[::std::mem::offset_of!(vmclock_abi, clock_status) - 34usize];
["Offset of field: vmclock_abi::leap_second_smearing_hint"]
[::std::mem::offset_of!(vmclock_abi, leap_second_smearing_hint) - 35usize];
["Offset of field: vmclock_abi::tai_offset_sec"]
[::std::mem::offset_of!(vmclock_abi, tai_offset_sec) - 36usize];
["Offset of field: vmclock_abi::leap_indicator"]
[::std::mem::offset_of!(vmclock_abi, leap_indicator) - 38usize];
["Offset of field: vmclock_abi::counter_period_shift"]
[::std::mem::offset_of!(vmclock_abi, counter_period_shift) - 39usize];
["Offset of field: vmclock_abi::counter_value"]
[::std::mem::offset_of!(vmclock_abi, counter_value) - 40usize];
["Offset of field: vmclock_abi::counter_period_frac_sec"]
[::std::mem::offset_of!(vmclock_abi, counter_period_frac_sec) - 48usize];
["Offset of field: vmclock_abi::counter_period_esterror_rate_frac_sec"]
[::std::mem::offset_of!(vmclock_abi, counter_period_esterror_rate_frac_sec) - 56usize];
["Offset of field: vmclock_abi::counter_period_maxerror_rate_frac_sec"]
[::std::mem::offset_of!(vmclock_abi, counter_period_maxerror_rate_frac_sec) - 64usize];
["Offset of field: vmclock_abi::time_sec"]
[::std::mem::offset_of!(vmclock_abi, time_sec) - 72usize];
["Offset of field: vmclock_abi::time_frac_sec"]
[::std::mem::offset_of!(vmclock_abi, time_frac_sec) - 80usize];
["Offset of field: vmclock_abi::time_esterror_nanosec"]
[::std::mem::offset_of!(vmclock_abi, time_esterror_nanosec) - 88usize];
["Offset of field: vmclock_abi::time_maxerror_nanosec"]
[::std::mem::offset_of!(vmclock_abi, time_maxerror_nanosec) - 96usize];
};
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/persist.rs | src/vmm/src/devices/virtio/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring Virtio primitives.
use std::num::Wrapping;
use std::sync::atomic::Ordering;
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use super::queue::{InvalidAvailIdx, QueueError};
use super::transport::mmio::IrqTrigger;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::generated::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use crate::devices::virtio::queue::Queue;
use crate::devices::virtio::transport::mmio::MmioTransport;
use crate::snapshot::Persist;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
/// Errors thrown during restoring virtio state.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum PersistError {
/// Snapshot state contains invalid queue info.
InvalidInput,
/// Could not restore queue: {0}
QueueConstruction(QueueError),
/// {0}
InvalidAvailIdx(#[from] InvalidAvailIdx),
}
/// Queue information saved in snapshot.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct QueueState {
/// The maximal size in elements offered by the device
max_size: u16,
/// The queue size in elements the driver selected
size: u16,
/// Indicates if the queue is finished with configuration
ready: bool,
/// Guest physical address of the descriptor table
desc_table: u64,
/// Guest physical address of the available ring
avail_ring: u64,
/// Guest physical address of the used ring
used_ring: u64,
next_avail: Wrapping<u16>,
next_used: Wrapping<u16>,
/// The number of added used buffers since last guest kick
num_added: Wrapping<u16>,
}
/// Auxiliary structure for restoring queues.
#[derive(Debug, Clone)]
pub struct QueueConstructorArgs {
/// Pointer to guest memory.
pub mem: GuestMemoryMmap,
/// Is device this queue belong to activated
pub is_activated: bool,
}
impl Persist<'_> for Queue {
type State = QueueState;
type ConstructorArgs = QueueConstructorArgs;
type Error = QueueError;
fn save(&self) -> Self::State {
QueueState {
max_size: self.max_size,
size: self.size,
ready: self.ready,
desc_table: self.desc_table_address.0,
avail_ring: self.avail_ring_address.0,
used_ring: self.used_ring_address.0,
next_avail: self.next_avail,
next_used: self.next_used,
num_added: self.num_added,
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let mut queue = Queue {
max_size: state.max_size,
size: state.size,
ready: state.ready,
desc_table_address: GuestAddress(state.desc_table),
avail_ring_address: GuestAddress(state.avail_ring),
used_ring_address: GuestAddress(state.used_ring),
desc_table_ptr: std::ptr::null(),
avail_ring_ptr: std::ptr::null_mut(),
used_ring_ptr: std::ptr::null_mut(),
next_avail: state.next_avail,
next_used: state.next_used,
uses_notif_suppression: false,
num_added: state.num_added,
};
if constructor_args.is_activated {
queue.initialize(&constructor_args.mem)?;
}
Ok(queue)
}
}
/// State of a VirtioDevice.
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct VirtioDeviceState {
/// Device type.
pub device_type: u32,
/// Available virtio features.
pub avail_features: u64,
/// Negotiated virtio features.
pub acked_features: u64,
/// List of queues.
pub queues: Vec<QueueState>,
/// Flag for activated status.
pub activated: bool,
}
impl VirtioDeviceState {
/// Construct the virtio state of a device.
pub fn from_device(device: &dyn VirtioDevice) -> Self {
VirtioDeviceState {
device_type: device.device_type(),
avail_features: device.avail_features(),
acked_features: device.acked_features(),
queues: device.queues().iter().map(Persist::save).collect(),
activated: device.is_activated(),
}
}
/// Does sanity checking on the `self` state against expected values
/// and builds queues from state.
pub fn build_queues_checked(
&self,
mem: &GuestMemoryMmap,
expected_device_type: u32,
expected_num_queues: usize,
expected_queue_max_size: u16,
) -> Result<Vec<Queue>, PersistError> {
// Sanity check:
// - right device type,
// - acked features is a subset of available ones,
// - right number of queues,
if self.device_type != expected_device_type
|| (self.acked_features & !self.avail_features) != 0
|| self.queues.len() != expected_num_queues
{
return Err(PersistError::InvalidInput);
}
let uses_notif_suppression = (self.acked_features & (1u64 << VIRTIO_RING_F_EVENT_IDX)) != 0;
let queue_construction_args = QueueConstructorArgs {
mem: mem.clone(),
is_activated: self.activated,
};
let queues: Vec<Queue> = self
.queues
.iter()
.map(|queue_state| {
Queue::restore(queue_construction_args.clone(), queue_state)
.map(|mut queue| {
if uses_notif_suppression {
queue.enable_notif_suppression();
}
queue
})
.map_err(PersistError::QueueConstruction)
})
.collect::<Result<_, _>>()?;
for q in &queues {
// Sanity check queue size and queue max size.
if q.max_size != expected_queue_max_size {
return Err(PersistError::InvalidInput);
}
}
Ok(queues)
}
}
/// Transport information saved in snapshot.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct MmioTransportState {
// The register where feature bits are stored.
features_select: u32,
// The register where features page is selected.
acked_features_select: u32,
queue_select: u32,
device_status: u32,
config_generation: u32,
interrupt_status: u32,
}
/// Auxiliary structure for initializing the transport when resuming from a snapshot.
#[derive(Debug)]
pub struct MmioTransportConstructorArgs {
/// Pointer to guest memory.
pub mem: GuestMemoryMmap,
/// Interrupt to use for the device
pub interrupt: Arc<IrqTrigger>,
/// Device associated with the current MMIO state.
pub device: Arc<Mutex<dyn VirtioDevice>>,
/// Is device backed by vhost-user.
pub is_vhost_user: bool,
}
impl Persist<'_> for MmioTransport {
type State = MmioTransportState;
type ConstructorArgs = MmioTransportConstructorArgs;
type Error = ();
fn save(&self) -> Self::State {
MmioTransportState {
features_select: self.features_select,
acked_features_select: self.acked_features_select,
queue_select: self.queue_select,
device_status: self.device_status,
config_generation: self.config_generation,
interrupt_status: self.interrupt.irq_status.load(Ordering::SeqCst),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let mut transport = MmioTransport::new(
constructor_args.mem,
constructor_args.interrupt,
constructor_args.device,
constructor_args.is_vhost_user,
);
transport.features_select = state.features_select;
transport.acked_features_select = state.acked_features_select;
transport.queue_select = state.queue_select;
transport.device_status = state.device_status;
transport.config_generation = state.config_generation;
transport
.interrupt
.irq_status
.store(state.interrupt_status, Ordering::SeqCst);
Ok(transport)
}
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::devices::virtio::block::virtio::VirtioBlock;
use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::devices::virtio::block::virtio::test_utils::default_block_with_path;
use crate::devices::virtio::net::Net;
use crate::devices::virtio::net::test_utils::default_net;
use crate::devices::virtio::test_utils::default_mem;
use crate::devices::virtio::transport::mmio::tests::DummyDevice;
use crate::devices::virtio::vsock::{Vsock, VsockUnixBackend};
use crate::snapshot::Snapshot;
const DEFAULT_QUEUE_MAX_SIZE: u16 = 256;
impl Default for QueueState {
fn default() -> QueueState {
QueueState {
max_size: DEFAULT_QUEUE_MAX_SIZE,
size: DEFAULT_QUEUE_MAX_SIZE,
ready: false,
desc_table: 0,
avail_ring: 0,
used_ring: 0,
next_avail: Wrapping(0),
next_used: Wrapping(0),
num_added: Wrapping(0),
}
}
}
#[test]
fn test_virtiodev_sanity_checks() {
let max_size = DEFAULT_QUEUE_MAX_SIZE;
let mut state = VirtioDeviceState::default();
let mem = default_mem();
// Valid checks.
state.build_queues_checked(&mem, 0, 0, max_size).unwrap();
// Invalid dev-type.
state
.build_queues_checked(&mem, 1, 0, max_size)
.unwrap_err();
// Invalid num-queues.
state
.build_queues_checked(&mem, 0, 1, max_size)
.unwrap_err();
// Unavailable features acked.
state.acked_features = 1;
state
.build_queues_checked(&mem, 0, 0, max_size)
.unwrap_err();
// Validate queue sanity checks.
let mut state = VirtioDeviceState::default();
let good_q = QueueState::default();
state.queues = vec![good_q];
// Valid.
state
.build_queues_checked(&mem, 0, state.queues.len(), max_size)
.unwrap();
// Invalid max queue size.
let bad_q = QueueState {
max_size: max_size + 1,
..Default::default()
};
state.queues = vec![bad_q];
state
.build_queues_checked(&mem, 0, state.queues.len(), max_size)
.unwrap_err();
// Invalid: size > max.
let bad_q = QueueState {
size: max_size + 1,
..Default::default()
};
state.queues = vec![bad_q];
state.activated = true;
state
.build_queues_checked(&mem, 0, state.queues.len(), max_size)
.unwrap_err();
// activated && !q.is_valid()
let bad_q = QueueState::default();
state.queues = vec![bad_q];
state.activated = true;
state
.build_queues_checked(&mem, 0, state.queues.len(), max_size)
.unwrap_err();
}
#[test]
fn test_queue_persistence() {
let mem = default_mem();
let mut queue = Queue::new(128);
queue.ready = true;
queue.size = queue.max_size;
queue.initialize(&mem).unwrap();
let mut bytes = vec![0; 4096];
Snapshot::new(queue.save())
.save(&mut bytes.as_mut_slice())
.unwrap();
let ca = QueueConstructorArgs {
mem,
is_activated: true,
};
let restored_queue = Queue::restore(
ca,
&Snapshot::load_without_crc_check(bytes.as_slice())
.unwrap()
.data,
)
.unwrap();
assert_eq!(restored_queue, queue);
}
#[test]
fn test_virtio_device_state_serde() {
let dummy = DummyDevice::new();
let mut mem = vec![0; 4096];
let state = VirtioDeviceState::from_device(&dummy);
Snapshot::new(&state).save(&mut mem.as_mut_slice()).unwrap();
let restored_state: VirtioDeviceState = Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data;
assert_eq!(restored_state, state);
}
impl PartialEq for MmioTransport {
fn eq(&self, other: &MmioTransport) -> bool {
let self_dev_type = self.device().lock().unwrap().device_type();
self.acked_features_select == other.acked_features_select &&
self.features_select == other.features_select &&
self.queue_select == other.queue_select &&
self.device_status == other.device_status &&
self.config_generation == other.config_generation &&
self.interrupt.irq_status.load(Ordering::SeqCst) == other.interrupt.irq_status.load(Ordering::SeqCst) &&
// Only checking equality of device type, actual device (de)ser is tested by that
// device's tests.
self_dev_type == other.device().lock().unwrap().device_type()
}
}
fn generic_mmiotransport_persistence_test(
mmio_transport: MmioTransport,
interrupt: Arc<IrqTrigger>,
mem: GuestMemoryMmap,
device: Arc<Mutex<dyn VirtioDevice>>,
) {
let mut buf = vec![0; 4096];
Snapshot::new(mmio_transport.save())
.save(&mut buf.as_mut_slice())
.unwrap();
let restore_args = MmioTransportConstructorArgs {
mem,
interrupt,
device,
is_vhost_user: false,
};
let restored_mmio_transport = MmioTransport::restore(
restore_args,
&Snapshot::load_without_crc_check(buf.as_slice())
.unwrap()
.data,
)
.unwrap();
assert_eq!(restored_mmio_transport, mmio_transport);
}
fn create_default_block() -> (
MmioTransport,
Arc<IrqTrigger>,
GuestMemoryMmap,
Arc<Mutex<VirtioBlock>>,
) {
let mem = default_mem();
let interrupt = Arc::new(IrqTrigger::new());
// Create backing file.
let f = TempFile::new().unwrap();
f.as_file().set_len(0x1000).unwrap();
let block = default_block_with_path(
f.as_path().to_str().unwrap().to_string(),
FileEngineType::default(),
);
let block = Arc::new(Mutex::new(block));
let mmio_transport =
MmioTransport::new(mem.clone(), interrupt.clone(), block.clone(), false);
(mmio_transport, interrupt, mem, block)
}
fn create_default_net() -> (
MmioTransport,
Arc<IrqTrigger>,
GuestMemoryMmap,
Arc<Mutex<Net>>,
) {
let mem = default_mem();
let interrupt = Arc::new(IrqTrigger::new());
let net = Arc::new(Mutex::new(default_net()));
let mmio_transport = MmioTransport::new(mem.clone(), interrupt.clone(), net.clone(), false);
(mmio_transport, interrupt, mem, net)
}
fn default_vsock() -> (
MmioTransport,
Arc<IrqTrigger>,
GuestMemoryMmap,
Arc<Mutex<Vsock<VsockUnixBackend>>>,
) {
let mem = default_mem();
let interrupt = Arc::new(IrqTrigger::new());
let guest_cid = 52;
let mut temp_uds_path = TempFile::new().unwrap();
// Remove the file so the path can be used by the socket.
temp_uds_path.remove().unwrap();
let uds_path = String::from(temp_uds_path.as_path().to_str().unwrap());
let backend = VsockUnixBackend::new(guest_cid, uds_path).unwrap();
let vsock = Vsock::new(guest_cid, backend).unwrap();
let vsock = Arc::new(Mutex::new(vsock));
let mmio_transport =
MmioTransport::new(mem.clone(), interrupt.clone(), vsock.clone(), false);
(mmio_transport, interrupt, mem, vsock)
}
#[test]
fn test_block_over_mmiotransport_persistence() {
let (mmio_transport, interrupt, mem, block) = create_default_block();
generic_mmiotransport_persistence_test(mmio_transport, interrupt, mem, block);
}
#[test]
fn test_net_over_mmiotransport_persistence() {
let (mmio_transport, interrupt, mem, net) = create_default_net();
generic_mmiotransport_persistence_test(mmio_transport, interrupt, mem, net);
}
#[test]
fn test_vsock_over_mmiotransport_persistence() {
let (mmio_transport, interrupt, mem, vsock) = default_vsock();
generic_mmiotransport_persistence_test(mmio_transport, interrupt, mem, vsock);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/device.rs | src/vmm/src/devices/virtio/device.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt;
use std::sync::Arc;
use std::sync::atomic::AtomicU32;
use vmm_sys_util::eventfd::EventFd;
use super::ActivateError;
use super::queue::{Queue, QueueError};
use super::transport::VirtioInterrupt;
use crate::devices::virtio::AsAny;
use crate::logger::warn;
use crate::vstate::memory::GuestMemoryMmap;
/// State of an active VirtIO device
#[derive(Debug, Clone)]
pub struct ActiveState {
pub mem: GuestMemoryMmap,
pub interrupt: Arc<dyn VirtioInterrupt>,
}
/// Enum that indicates if a VirtioDevice is inactive or has been activated
/// and memory attached to it.
#[derive(Debug)]
pub enum DeviceState {
Inactive,
Activated(ActiveState),
}
impl DeviceState {
/// Checks if the device is activated.
pub fn is_activated(&self) -> bool {
match self {
DeviceState::Inactive => false,
DeviceState::Activated(_) => true,
}
}
/// Gets the memory and interrupt attached to the device if it is activated.
pub fn active_state(&self) -> Option<&ActiveState> {
match self {
DeviceState::Activated(state) => Some(state),
DeviceState::Inactive => None,
}
}
}
/// Trait for virtio devices to be driven by a virtio transport.
///
/// The lifecycle of a virtio device is to be moved to a virtio transport, which will then query the
/// device. The virtio devices needs to create queues, events and event fds for interrupts and
/// expose them to the transport via get_queues/get_queue_events/get_interrupt/get_interrupt_status
/// fns.
pub trait VirtioDevice: AsAny + Send {
/// Get the available features offered by device.
fn avail_features(&self) -> u64;
/// Get acknowledged features of the driver.
fn acked_features(&self) -> u64;
/// Set acknowledged features of the driver.
/// This function must maintain the following invariant:
/// - self.avail_features() & self.acked_features() = self.get_acked_features()
fn set_acked_features(&mut self, acked_features: u64);
/// Check if virtio device has negotiated given feature.
fn has_feature(&self, feature: u64) -> bool {
(self.acked_features() & (1 << feature)) != 0
}
/// The virtio device type (as a constant of the struct).
fn const_device_type() -> u32
where
Self: Sized;
/// The virtio device type.
///
/// It should be the same as returned by Self::const_device_type().
fn device_type(&self) -> u32;
/// Returns the device queues.
fn queues(&self) -> &[Queue];
/// Returns a mutable reference to the device queues.
fn queues_mut(&mut self) -> &mut [Queue];
/// Returns the device queues event fds.
fn queue_events(&self) -> &[EventFd];
/// Returns the current device interrupt status.
fn interrupt_status(&self) -> Arc<AtomicU32> {
self.interrupt_trigger().status()
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt;
/// The set of feature bits shifted by `page * 32`.
fn avail_features_by_page(&self, page: u32) -> u32 {
let avail_features = self.avail_features();
match page {
// Get the lower 32-bits of the features bitfield.
0 => (avail_features & 0xFFFFFFFF) as u32,
// Get the upper 32-bits of the features bitfield.
1 => (avail_features >> 32) as u32,
_ => {
warn!("Received request for unknown features page.");
0u32
}
}
}
/// Acknowledges that this set of features should be enabled.
fn ack_features_by_page(&mut self, page: u32, value: u32) {
let mut v = match page {
0 => u64::from(value),
1 => u64::from(value) << 32,
_ => {
warn!("Cannot acknowledge unknown features page: {}", page);
0u64
}
};
// Check if the guest is ACK'ing a feature that we didn't claim to have.
let avail_features = self.avail_features();
let unrequested_features = v & !avail_features;
if unrequested_features != 0 {
warn!("Received acknowledge request for unknown feature: {:#x}", v);
// Don't count these features as acked.
v &= !unrequested_features;
}
self.set_acked_features(self.acked_features() | v);
}
/// Reads this device configuration space at `offset`.
fn read_config(&self, offset: u64, data: &mut [u8]);
/// Writes to this device configuration space at `offset`.
fn write_config(&mut self, offset: u64, data: &[u8]);
/// Performs the formal activation for a device, which can be verified also with `is_activated`.
fn activate(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError>;
/// Checks if the resources of this device are activated.
fn is_activated(&self) -> bool;
/// Optionally deactivates this device and returns ownership of the guest memory map, interrupt
/// event, and queue events.
fn reset(&mut self) -> Option<(Arc<dyn VirtioInterrupt>, Vec<EventFd>)> {
None
}
/// Mark pages used by queues as dirty.
fn mark_queue_memory_dirty(&mut self, mem: &GuestMemoryMmap) -> Result<(), QueueError> {
for queue in self.queues_mut() {
queue.initialize(mem)?
}
Ok(())
}
/// Kick the device, as if it had received external events.
fn kick(&mut self) {}
}
impl fmt::Debug for dyn VirtioDevice {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VirtioDevice type {}", self.device_type())
}
}
/// Utility to define both const_device_type and device_type with a u32 constant
#[macro_export]
macro_rules! impl_device_type {
($const_type:expr) => {
fn const_device_type() -> u32 {
$const_type
}
fn device_type(&self) -> u32 {
Self::const_device_type()
}
};
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[derive(Debug)]
struct MockVirtioDevice {
avail_features: u64,
acked_features: u64,
}
impl VirtioDevice for MockVirtioDevice {
impl_device_type!(0);
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features
}
fn queues(&self) -> &[Queue] {
todo!()
}
fn queues_mut(&mut self) -> &mut [Queue] {
todo!()
}
fn queue_events(&self) -> &[EventFd] {
todo!()
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
todo!()
}
fn read_config(&self, _offset: u64, _data: &mut [u8]) {
todo!()
}
fn write_config(&mut self, _offset: u64, _data: &[u8]) {
todo!()
}
fn activate(
&mut self,
_mem: GuestMemoryMmap,
_interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
todo!()
}
fn is_activated(&self) -> bool {
todo!()
}
}
#[test]
fn test_has_feature() {
let mut device = MockVirtioDevice {
avail_features: 0,
acked_features: 0,
};
let mock_feature_1 = 1u64;
assert!(!device.has_feature(mock_feature_1));
device.acked_features = 1 << mock_feature_1;
assert!(device.has_feature(mock_feature_1));
let mock_feature_2 = 2u64;
assert!(!device.has_feature(mock_feature_2));
device.acked_features = (1 << mock_feature_1) | (1 << mock_feature_2);
assert!(device.has_feature(mock_feature_1));
assert!(device.has_feature(mock_feature_2));
}
#[test]
fn test_features() {
let features: u64 = 0x11223344_55667788;
let mut device = MockVirtioDevice {
avail_features: features,
acked_features: 0,
};
assert_eq!(
device.avail_features_by_page(0),
(features & 0xFFFFFFFF) as u32,
);
assert_eq!(device.avail_features_by_page(1), (features >> 32) as u32);
for i in 2..10 {
assert_eq!(device.avail_features_by_page(i), 0u32);
}
for i in 0..10 {
device.ack_features_by_page(i, u32::MAX);
}
assert_eq!(device.acked_features, features);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/test_utils.rs | src/vmm/src/devices/virtio/test_utils.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![doc(hidden)]
use std::fmt::Debug;
use std::marker::PhantomData;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use crate::devices::virtio::queue::Queue;
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::devices::virtio::transport::mmio::IrqTrigger;
use crate::test_utils::single_region_mem;
use crate::utils::{align_up, u64_to_usize};
use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
#[macro_export]
macro_rules! check_metric_after_block {
($metric:expr, $delta:expr, $block:expr) => {{
let before = $metric.count();
let _ = $block;
assert_eq!($metric.count() - before, $delta, "unexpected metric value");
}};
}
/// Creates a [`GuestMemoryMmap`] with a single region of size 65536 (= 0x10000 hex) starting at
/// guest physical address 0
pub fn default_mem() -> GuestMemoryMmap {
single_region_mem(0x10000)
}
/// Creates a default ['IrqTrigger'] interrupt for a VirtIO device.
pub fn default_interrupt() -> Arc<dyn VirtioInterrupt> {
Arc::new(IrqTrigger::new())
}
#[derive(Debug)]
pub struct InputData {
pub data: Vec<u8>,
pub read_pos: AtomicUsize,
}
impl InputData {
pub fn get_slice(&self, len: usize) -> &[u8] {
let old_pos = self.read_pos.fetch_add(len, Ordering::AcqRel);
&self.data[old_pos..old_pos + len]
}
}
// Represents a location in GuestMemoryMmap which holds a given type.
#[derive(Debug)]
pub struct SomeplaceInMemory<'a, T> {
pub location: GuestAddress,
mem: &'a GuestMemoryMmap,
phantom: PhantomData<*const T>,
}
// The ByteValued trait is required to use mem.read_obj_from_addr and write_obj_at_addr.
impl<'a, T> SomeplaceInMemory<'a, T>
where
T: Debug + crate::vstate::memory::ByteValued,
{
fn new(location: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
SomeplaceInMemory {
location,
mem,
phantom: PhantomData,
}
}
// Reads from the actual memory location.
pub fn get(&self) -> T {
self.mem.read_obj(self.location).unwrap()
}
// Writes to the actual memory location.
pub fn set(&self, val: T) {
self.mem.write_obj(val, self.location).unwrap()
}
// This function returns a place in memory which holds a value of type U, and starts
// offset bytes after the current location.
fn map_offset<U: Debug>(&self, offset: usize) -> SomeplaceInMemory<'a, U> {
SomeplaceInMemory {
location: self.location.checked_add(offset as u64).unwrap(),
mem: self.mem,
phantom: PhantomData,
}
}
// This function returns a place in memory which holds a value of type U, and starts
// immediately after the end of self (which is location + sizeof(T)).
fn next_place<U: Debug>(&self) -> SomeplaceInMemory<'a, U> {
self.map_offset::<U>(mem::size_of::<T>())
}
fn end(&self) -> GuestAddress {
self.location
.checked_add(mem::size_of::<T>() as u64)
.unwrap()
}
}
// Represents a virtio descriptor in guest memory.
#[derive(Debug)]
pub struct VirtqDesc<'a> {
pub addr: SomeplaceInMemory<'a, u64>,
pub len: SomeplaceInMemory<'a, u32>,
pub flags: SomeplaceInMemory<'a, u16>,
pub next: SomeplaceInMemory<'a, u16>,
}
impl<'a> VirtqDesc<'a> {
pub const ALIGNMENT: u64 = 16;
fn new(start: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
assert_eq!(start.0 & (Self::ALIGNMENT - 1), 0);
let addr = SomeplaceInMemory::new(start, mem);
let len = addr.next_place();
let flags = len.next_place();
let next = flags.next_place();
VirtqDesc {
addr,
len,
flags,
next,
}
}
fn start(&self) -> GuestAddress {
self.addr.location
}
fn end(&self) -> GuestAddress {
self.next.end()
}
pub fn set(&self, addr: u64, len: u32, flags: u16, next: u16) {
self.addr.set(addr);
self.len.set(len);
self.flags.set(flags);
self.next.set(next);
}
pub fn memory(&self) -> &'a GuestMemoryMmap {
self.addr.mem
}
pub fn set_data(&mut self, data: &[u8]) {
assert!(self.len.get() as usize >= data.len());
let mem = self.addr.mem;
mem.write_slice(data, GuestAddress::new(self.addr.get()))
.unwrap();
}
pub fn check_data(&self, expected_data: &[u8]) {
assert!(self.len.get() as usize >= expected_data.len());
let mem = self.addr.mem;
let mut buf = vec![0; expected_data.len()];
mem.read_slice(&mut buf, GuestAddress::new(self.addr.get()))
.unwrap();
assert_eq!(buf.as_slice(), expected_data);
}
}
// Represents a virtio queue ring. The only difference between the used and available rings,
// is the ring element type.
#[derive(Debug)]
pub struct VirtqRing<'a, T> {
pub flags: SomeplaceInMemory<'a, u16>,
pub idx: SomeplaceInMemory<'a, u16>,
pub ring: Vec<SomeplaceInMemory<'a, T>>,
pub event: SomeplaceInMemory<'a, u16>,
}
impl<'a, T> VirtqRing<'a, T>
where
T: Debug + crate::vstate::memory::ByteValued,
{
fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16, alignment: usize) -> Self {
assert_eq!(start.0 & (alignment as u64 - 1), 0);
let flags = SomeplaceInMemory::new(start, mem);
let idx = flags.next_place();
let mut ring = Vec::with_capacity(qsize as usize);
ring.push(idx.next_place());
for _ in 1..qsize as usize {
let x = ring.last().unwrap().next_place();
ring.push(x)
}
let event = ring.last().unwrap().next_place();
flags.set(0);
idx.set(0);
event.set(0);
VirtqRing {
flags,
idx,
ring,
event,
}
}
pub fn end(&self) -> GuestAddress {
self.event.end()
}
}
#[repr(C)]
#[derive(Debug, Clone, Copy, Default)]
pub struct VirtqUsedElem {
pub id: u32,
pub len: u32,
}
// SAFETY: `VirtqUsedElem` is a POD and contains no padding.
unsafe impl crate::vstate::memory::ByteValued for VirtqUsedElem {}
pub type VirtqAvail<'a> = VirtqRing<'a, u16>;
pub type VirtqUsed<'a> = VirtqRing<'a, VirtqUsedElem>;
#[derive(Debug)]
pub struct VirtQueue<'a> {
pub dtable: Vec<VirtqDesc<'a>>,
pub avail: VirtqAvail<'a>,
pub used: VirtqUsed<'a>,
}
impl<'a> VirtQueue<'a> {
// We try to make sure things are aligned properly :-s
pub fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16) -> Self {
// power of 2?
assert!(qsize > 0 && qsize & (qsize - 1) == 0);
let mut dtable = Vec::with_capacity(qsize as usize);
let mut end = start;
for _ in 0..qsize {
let d = VirtqDesc::new(end, mem);
end = d.end();
dtable.push(d);
}
const AVAIL_ALIGN: usize = 2;
let avail = VirtqAvail::new(end, mem, qsize, AVAIL_ALIGN);
const USED_ALIGN: u64 = 4;
let mut x = avail.end().0;
x = align_up(x, USED_ALIGN);
let used = VirtqUsed::new(GuestAddress(x), mem, qsize, u64_to_usize(USED_ALIGN));
VirtQueue {
dtable,
avail,
used,
}
}
pub fn memory(&self) -> &'a GuestMemoryMmap {
self.used.flags.mem
}
pub fn size(&self) -> u16 {
// Safe to unwrap because the size is specified as a u16 when the table is first created.
self.dtable.len().try_into().unwrap()
}
pub fn dtable_start(&self) -> GuestAddress {
self.dtable.first().unwrap().start()
}
pub fn avail_start(&self) -> GuestAddress {
self.avail.flags.location
}
pub fn used_start(&self) -> GuestAddress {
self.used.flags.location
}
// Creates a new Queue, using the underlying memory regions represented by the VirtQueue.
pub fn create_queue(&self) -> Queue {
let mut q = Queue::new(self.size());
q.size = self.size();
q.ready = true;
q.desc_table_address = self.dtable_start();
q.avail_ring_address = self.avail_start();
q.used_ring_address = self.used_start();
q.initialize(self.memory()).unwrap();
q
}
pub fn start(&self) -> GuestAddress {
self.dtable_start()
}
pub fn end(&self) -> GuestAddress {
self.used.end()
}
pub fn check_used_elem(&self, used_index: u16, expected_id: u16, expected_len: u32) {
let used_elem = self.used.ring[used_index as usize].get();
assert_eq!(used_elem.id, u32::from(expected_id));
assert_eq!(used_elem.len, expected_len);
}
}
#[cfg(test)]
pub(crate) mod test {
use std::fmt::{self, Debug};
use std::sync::{Arc, Mutex, MutexGuard};
use event_manager::{EventManager, MutEventSubscriber, SubscriberId, SubscriberOps};
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::net::MAX_BUFFER_SIZE;
use crate::devices::virtio::queue::{Queue, VIRTQ_DESC_F_NEXT};
use crate::devices::virtio::test_utils::{VirtQueue, VirtqDesc, default_interrupt};
use crate::test_utils::single_region_mem;
use crate::vstate::memory::{Address, GuestAddress, GuestMemoryMmap};
pub fn create_virtio_mem() -> GuestMemoryMmap {
single_region_mem(MAX_BUFFER_SIZE)
}
/// Provides functionality necessary for testing a VirtIO device with
/// [`VirtioTestHelper`](VirtioTestHelper)
pub trait VirtioTestDevice: VirtioDevice {
/// Replace the queues used by the device
fn set_queues(&mut self, queues: Vec<Queue>);
/// Number of queues this device supports
fn num_queues(&self) -> usize;
}
/// A helper type to allow testing VirtIO devices
///
/// `VirtioTestHelper` provides functionality to allow testing a VirtIO device by
/// 1. Emulating the guest size of things (essentially the handling of Virtqueues) and
/// 2. Emulating an event loop that handles device specific events
///
/// It creates and handles a guest memory address space, which uses for keeping the
/// Virtqueues of the device and storing data, i.e. storing data described by DescriptorChains
/// that the guest would pass to the device during normal operation
pub struct VirtioTestHelper<'a, T>
where
T: VirtioTestDevice + MutEventSubscriber,
{
event_manager: EventManager<Arc<Mutex<T>>>,
_subscriber_id: SubscriberId,
device: Arc<Mutex<T>>,
virtqueues: Vec<VirtQueue<'a>>,
}
impl<T: VirtioTestDevice + MutEventSubscriber + Debug> fmt::Debug for VirtioTestHelper<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("VirtioTestHelper")
.field("event_manager", &"?")
.field("_subscriber_id", &self._subscriber_id)
.field("device", &self.device)
.field("virtqueues", &self.virtqueues)
.finish()
}
}
impl<'a, T> VirtioTestHelper<'a, T>
where
T: VirtioTestDevice + MutEventSubscriber + Debug,
{
const QUEUE_SIZE: u16 = 16;
// Helper function to create a set of Virtqueues for the device
fn create_virtqueues(mem: &'a GuestMemoryMmap, num_queues: usize) -> Vec<VirtQueue<'a>> {
(0..num_queues)
.scan(GuestAddress(0), |next_addr, _| {
let vqueue = VirtQueue::new(*next_addr, mem, Self::QUEUE_SIZE);
// Address for the next virt queue will be the first aligned address after
// the end of this one.
*next_addr = vqueue.end().unchecked_align_up(VirtqDesc::ALIGNMENT);
Some(vqueue)
})
.collect::<Vec<_>>()
}
/// Create a new Virtio Device test helper
pub fn new(mem: &'a GuestMemoryMmap, mut device: T) -> VirtioTestHelper<'a, T> {
let mut event_manager = EventManager::new().unwrap();
let virtqueues = Self::create_virtqueues(mem, device.num_queues());
let queues = virtqueues.iter().map(|vq| vq.create_queue()).collect();
device.set_queues(queues);
let device = Arc::new(Mutex::new(device));
let _subscriber_id = event_manager.add_subscriber(device.clone());
Self {
event_manager,
_subscriber_id,
device,
virtqueues,
}
}
/// Get a (locked) reference to the device
pub fn device(&mut self) -> MutexGuard<'_, T> {
self.device.lock().unwrap()
}
/// Activate the device
pub fn activate_device(&mut self, mem: &'a GuestMemoryMmap) {
let interrupt = default_interrupt();
self.device
.lock()
.unwrap()
.activate(mem.clone(), interrupt)
.unwrap();
// Process the activate event
let ev_count = self.event_manager.run_with_timeout(100).unwrap();
assert_eq!(ev_count, 1);
}
/// Get the start of the data region
///
/// The first address that can be used for data in the guest memory mmap
/// is the first address after the memory occupied by the last Virtqueue
/// used by the device
pub fn data_address(&self) -> u64 {
self.virtqueues.last().unwrap().end().raw_value()
}
/// Add a new Descriptor in one of the device's queues in the form of scatter gather
///
/// This function adds in one of the queues of the device a DescriptorChain at some offset
/// in the "data range" of the guest memory. The number of descriptors to create is passed
/// as a list of descriptors (a tuple of (index, addr, length, flags)).
///
/// The total size of the buffer is the sum of all lengths of this list of descriptors.
/// The fist descriptor will be stored at `self.data_address() + addr_offset`. Subsequent
/// descriptors will be placed at random addresses after that.
///
/// # Arguments
///
/// * `queue` - The index of the device queue to use
/// * `addr_offset` - Offset within the data region where to put the first descriptor
/// * `desc_list` - List of descriptors to create in the chain
pub fn add_scatter_gather(
&mut self,
queue: usize,
addr_offset: u64,
desc_list: &[(u16, u64, u32, u16)],
) {
let device = self.device.lock().unwrap();
let event_fd = &device.queue_events()[queue];
let vq = &self.virtqueues[queue];
// Create the descriptor chain
let mut iter = desc_list.iter().peekable();
while let Some(&(index, addr, len, flags)) = iter.next() {
let desc = &vq.dtable[index as usize];
desc.set(addr, len, flags, 0);
if let Some(&&(next_index, _, _, _)) = iter.peek() {
desc.flags.set(flags | VIRTQ_DESC_F_NEXT);
desc.next.set(next_index);
}
}
// Mark the chain as available.
if let Some(&(index, _, _, _)) = desc_list.first() {
let ring_index = vq.avail.idx.get();
vq.avail.ring[ring_index as usize].set(index);
vq.avail.idx.set(ring_index + 1);
}
event_fd.write(1).unwrap();
}
/// Get the address of a descriptor
pub fn desc_address(&self, queue: usize, index: usize) -> GuestAddress {
GuestAddress(self.virtqueues[queue].dtable[index].addr.get())
}
/// Add a new Descriptor in one of the device's queues
///
/// This function adds in one of the queues of the device a DescriptorChain at some offset
/// in the "data range" of the guest memory. The number of descriptors to create is passed
/// as a list of descriptors (a triple of (index, length, flags)).
///
/// The total size of the buffer is the sum of all lengths of this list of descriptors.
/// The fist descriptor will be stored at `self.data_address() + addr_offset`. Subsequent
/// descriptors will be placed at random addresses after that.
///
/// # Arguments
///
/// * `queue` - The index of the device queue to use
/// * `addr_offset` - Offset within the data region where to put the first descriptor
/// * `desc_list` - List of descriptors to create in the chain
pub fn add_desc_chain(
&mut self,
queue: usize,
addr_offset: u64,
desc_list: &[(u16, u32, u16)],
) {
let device = self.device.lock().unwrap();
let event_fd = &device.queue_events()[queue];
let vq = &self.virtqueues[queue];
// Create the descriptor chain
let mut iter = desc_list.iter().peekable();
let mut addr = self.data_address() + addr_offset;
while let Some(&(index, len, flags)) = iter.next() {
let desc = &vq.dtable[index as usize];
desc.set(addr, len, flags, 0);
if let Some(&&(next_index, _, _)) = iter.peek() {
desc.flags.set(flags | VIRTQ_DESC_F_NEXT);
desc.next.set(next_index);
}
addr += u64::from(len);
// Add small random gaps between descriptor addresses in order to make sure we
// don't blindly read contiguous memory.
addr += u64::from(vmm_sys_util::rand::xor_pseudo_rng_u32()) % 10;
}
// Mark the chain as available.
if let Some(&(index, _, _)) = desc_list.first() {
let ring_index = vq.avail.idx.get();
vq.avail.ring[ring_index as usize].set(index);
vq.avail.idx.set(ring_index + 1);
}
event_fd.write(1).unwrap();
}
/// Emulate the device for a period of time
///
/// # Arguments
///
/// * `msec` - The amount pf time in milliseconds for which to Emulate
pub fn emulate_for_msec(&mut self, msec: i32) -> Result<usize, event_manager::Error> {
self.event_manager.run_with_timeout(msec)
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vhost_user_metrics.rs | src/vmm/src/devices/virtio/vhost_user_metrics.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system for vhost-user devices.
//!
//! # Metrics format
//! The metrics are flushed in JSON when requested by vmm::logger::metrics::METRICS.write().
//!
//! ## JSON example with metrics:
//! ```json
//! {
//! "vhost_user_{mod}_id0": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "init_time_us": SharedStoreMetric,
//! "activate_time_us": SharedStoreMetric,
//! "config_change_time_us": SharedStoreMetric,
//! }
//! "vhost_user_{mod}_id1": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "init_time_us": SharedStoreMetric,
//! "activate_time_us": SharedStoreMetric,
//! "config_change_time_us": SharedStoreMetric,
//! }
//! ...
//! "vhost_user_{mod}_idN": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "init_time_us": SharedStoreMetric,
//! "activate_time_us": SharedStoreMetric,
//! "config_change_time_us": SharedStoreMetric,
//! }
//! }
//! ```
//! Each `vhost_user` field in the example above is a serializable `VhostUserDeviceMetrics`
//! structure collecting metrics such as `activate_fails`, `cfg_fails`, `init_time_us`,
//! `activate_time_us` and `config_change_time_us` for the vhost_user device.
//! For vhost-user block device having endpoint "/drives/drv0" the emitted metrics would be
//! `vhost_user_block_drv0`.
//! For vhost-user block device having endpoint "/drives/drvN" the emitted metrics would be
//! `vhost_user_block_drvN`.
//! Aggregate metrics for `vhost_user` if `not` emitted as it can be easily obtained in
//! typical observability tools.
//!
//! # Design
//! The main design goals of this system are:
//! * To improve vhost_user device metrics by logging them at per device granularity.
//! * `vhost_user` is a new device with no metrics emitted before so, backward compatibility doesn't
//! come into picture like it was in the case of block/net devices. And since, metrics can be
//! easily aggregated using typical observability tools, we chose not to provide aggregate
//! vhost_user metrics.
//! * Rely on `serde` to provide the actual serialization for writing the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them, to
//! avoid having to initialize everything by hand.
//!
//! * Follow the design of Block and Net device metrics and use a map of vhost_user device name and
//! corresponding metrics.
//! * Metrics are flushed with key `vhost_user_{module_specific_name}` and each module sets an
//! appropriate `module_specific_name` in the format `{mod}_{id}`. e.g. vhost-user block device in
//! this commit set this as `format!("{}_{}", "block_", config.drive_id.clone());` This way
//! vhost_user_metrics stay generic while the specific vhost_user devices can have their unique
//! metrics.
//!
//! The system implements 2 type of metrics:
//! * Shared Incremental Metrics (SharedIncMetrics) - dedicated for the metrics which need a counter
//! (i.e the number of times activating a device failed). These metrics are reset upon flush.
//! * Shared Store Metrics (SharedStoreMetrics) - are targeted at keeping a persistent value, it is
//! `not` intended to act as a counter (i.e for measure the process start up time for example).
//!
//! We add VhostUserDeviceMetrics entries from vhost_user_metrics::METRICS into vhost_user device
//! instead of vhost_user device having individual separate VhostUserDeviceMetrics entries because
//! vhost_user device is not accessible from signal handlers to flush metrics and
//! vhost_user_metrics::METRICS is.
use std::collections::BTreeMap;
use std::sync::{Arc, RwLock};
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use crate::logger::{SharedIncMetric, SharedStoreMetric};
/// map of vhost_user drive id and metrics
/// this should be protected by a lock before accessing.
#[allow(missing_debug_implementations)]
pub struct VhostUserMetricsPerDevice {
/// used to access per vhost_user device metrics
pub metrics: BTreeMap<String, Arc<VhostUserDeviceMetrics>>,
}
impl VhostUserMetricsPerDevice {
/// Allocate `VhostUserDeviceMetrics` for vhost_user device having
/// id `drive_id`. Also, allocate only if it doesn't
/// exist to avoid overwriting previously allocated data.
/// lock is always initialized so it is safe the unwrap
/// the lock without a check.
pub fn alloc(drive_id: String) -> Arc<VhostUserDeviceMetrics> {
Arc::clone(
METRICS
.write()
.unwrap()
.metrics
.entry(drive_id)
.or_insert_with(|| Arc::new(VhostUserDeviceMetrics::default())),
)
}
}
/// Pool of vhost_user-related metrics per device behind a lock to
/// keep things thread safe. Since the lock is initialized here
/// it is safe to unwrap it without any check.
static METRICS: RwLock<VhostUserMetricsPerDevice> = RwLock::new(VhostUserMetricsPerDevice {
metrics: BTreeMap::new(),
});
/// This function facilitates serialization of vhost_user device metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let vhost_user_metrics = METRICS.read().unwrap();
let metrics_len = vhost_user_metrics.metrics.len();
let mut seq = serializer.serialize_map(Some(metrics_len))?;
for (name, metrics) in vhost_user_metrics.metrics.iter() {
let devn = format!("vhost_user_{}", name);
seq.serialize_entry(&devn, metrics)?;
}
seq.end()
}
/// vhost_user Device associated metrics.
#[derive(Debug, Default, Serialize)]
pub struct VhostUserDeviceMetrics {
/// Number of times when activate failed on a vhost_user device.
pub activate_fails: SharedIncMetric,
/// Number of times when interacting with the space config of a vhost-user device failed.
pub cfg_fails: SharedIncMetric,
// Vhost-user init time in microseconds.
pub init_time_us: SharedStoreMetric,
// Vhost-user activate time in microseconds.
pub activate_time_us: SharedStoreMetric,
// Vhost-user config change time in microseconds.
pub config_change_time_us: SharedStoreMetric,
}
#[cfg(test)]
pub mod tests {
use utils::time::{ClockType, get_time_us};
use super::*;
use crate::logger::{IncMetric, StoreMetric};
// vhost-user metrics has both SharedIncMetrics and SharedStoreMetrics
// In this test we try to test one field for each type by creating a
// dummy vhost_user_block metric named `vhost_user_block_drvN`.
// There is no specific reason to storing the measured time taken vs a
// random number in `init_time_us`.
// We add an additional test to confirm that `vhost_user_metrics::METRICS`
// actually has an entry for `vhost_user_block_drvN` and compare it.
// We chose serde_json to compare because that seemed easiest to compare
// the entire struct format and serialization of VhostUserDeviceMetrics.
#[test]
fn test_vhost_user_basic_metrics() {
let vhost_user_dev_name: String = String::from("vhost_user_block_drvN");
let start_time = get_time_us(ClockType::Monotonic);
let vhost_user_metrics: Arc<VhostUserDeviceMetrics> =
VhostUserMetricsPerDevice::alloc(vhost_user_dev_name.clone());
let delta_us = get_time_us(ClockType::Monotonic) - start_time;
vhost_user_metrics.activate_fails.inc();
assert_eq!(vhost_user_metrics.activate_fails.count(), 1);
vhost_user_metrics.init_time_us.store(delta_us);
assert_eq!(vhost_user_metrics.init_time_us.fetch(), delta_us);
// fill another local variable with the same data and use it to compare with the METRICS
// entry
let vhost_user_metrics_backup: VhostUserDeviceMetrics = VhostUserDeviceMetrics::default();
vhost_user_metrics_backup.activate_fails.inc();
vhost_user_metrics_backup.init_time_us.store(delta_us);
// serializing METRICS also flushes the SharedIncMetric data so we have to use _backup
// variable for comparison.
let vhost_user_metrics_global: String =
serde_json::to_string(&METRICS.read().unwrap().metrics.get(&vhost_user_dev_name))
.unwrap();
let vhost_user_metrics_local: String =
serde_json::to_string(&vhost_user_metrics_backup).unwrap();
assert_eq!(vhost_user_metrics_local, vhost_user_metrics_global);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/mod.rs | src/vmm/src/devices/virtio/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Implements virtio devices, queues, and transport mechanisms.
use std::any::Any;
use self::queue::QueueError;
use crate::devices::virtio::net::TapError;
pub mod balloon;
pub mod block;
pub mod device;
pub mod generated;
mod iov_deque;
pub mod iovec;
pub mod mem;
pub mod net;
pub mod persist;
pub mod pmem;
pub mod queue;
pub mod rng;
pub mod test_utils;
pub mod transport;
pub mod vhost_user;
pub mod vhost_user_metrics;
pub mod vsock;
/// When the driver initializes the device, it lets the device know about the
/// completed stages using the Device Status Field.
///
/// These following consts are defined in the order in which the bits would
/// typically be set by the driver. INIT -> ACKNOWLEDGE -> DRIVER and so on.
///
/// This module is a 1:1 mapping for the Device Status Field in the virtio 1.0
/// specification, section 2.1.
mod device_status {
pub const INIT: u32 = 0;
pub const ACKNOWLEDGE: u32 = 1;
pub const DRIVER: u32 = 2;
pub const FAILED: u32 = 128;
pub const FEATURES_OK: u32 = 8;
pub const DRIVER_OK: u32 = 4;
pub const DEVICE_NEEDS_RESET: u32 = 64;
}
/// Offset from the base MMIO address of a virtio device used by the guest to notify the device of
/// queue events.
pub const NOTIFY_REG_OFFSET: u32 = 0x50;
/// Errors triggered when activating a VirtioDevice.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum ActivateError {
/// Wrong number of queue for virtio device: expected {expected}, got {got}
QueueMismatch { expected: usize, got: usize },
/// Failed to write to activate eventfd
EventFd,
/// Vhost user: {0}
VhostUser(vhost_user::VhostUserError),
/// Setting tap interface offload flags failed: {0}
TapSetOffload(TapError),
/// Error setting pointers in the queue: (0)
QueueMemoryError(QueueError),
/// The driver didn't acknowledge a required feature: {0}
RequiredFeatureNotAcked(&'static str),
}
/// Trait that helps in upcasting an object to Any
pub trait AsAny {
/// Return the immutable any encapsulated object.
fn as_any(&self) -> &dyn Any;
/// Return the mutable encapsulated any object.
fn as_mut_any(&mut self) -> &mut dyn Any;
}
impl<T: Any> AsAny for T {
fn as_any(&self) -> &dyn Any {
self
}
fn as_mut_any(&mut self) -> &mut dyn Any {
self
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/iovec.rs | src/vmm/src/devices/virtio/iovec.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::io::ErrorKind;
use libc::{c_void, iovec, size_t};
use serde::{Deserialize, Serialize};
use vm_memory::bitmap::Bitmap;
use vm_memory::{
GuestMemory, GuestMemoryError, ReadVolatile, VolatileMemoryError, VolatileSlice, WriteVolatile,
};
use super::iov_deque::{IovDeque, IovDequeError};
use super::queue::FIRECRACKER_MAX_QUEUE_SIZE;
use crate::devices::virtio::queue::DescriptorChain;
use crate::vstate::memory::GuestMemoryMmap;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum IoVecError {
/// Tried to create an `IoVec` from a write-only descriptor chain
WriteOnlyDescriptor,
/// Tried to create an 'IoVecMut` from a read-only descriptor chain
ReadOnlyDescriptor,
/// Tried to create an `IoVec` or `IoVecMut` from a descriptor chain that was too large
OverflowedDescriptor,
/// Tried to push to full IovDeque.
IovDequeOverflow,
/// Guest memory error: {0}
GuestMemory(#[from] GuestMemoryError),
/// Error with underlying `IovDeque`: {0}
IovDeque(#[from] IovDequeError),
}
/// This is essentially a wrapper of a `Vec<libc::iovec>` which can be passed to `libc::writev`.
///
/// It describes a buffer passed to us by the guest that is scattered across multiple
/// memory regions. Additionally, this wrapper provides methods that allow reading arbitrary ranges
/// of data from that buffer.
#[derive(Debug, Default)]
pub struct IoVecBuffer {
// container of the memory regions included in this IO vector
vecs: Vec<iovec>,
// Total length of the IoVecBuffer
len: u32,
}
// SAFETY: `IoVecBuffer` doesn't allow for interior mutability and no shared ownership is possible
// as it doesn't implement clone
unsafe impl Send for IoVecBuffer {}
impl IoVecBuffer {
/// Create an `IoVecBuffer` from a `DescriptorChain`
///
/// # Safety
///
/// The descriptor chain cannot be referencing the same memory location as another chain
pub unsafe fn load_descriptor_chain(
&mut self,
mem: &GuestMemoryMmap,
head: DescriptorChain,
) -> Result<(), IoVecError> {
self.clear();
let mut next_descriptor = Some(head);
while let Some(desc) = next_descriptor {
if desc.is_write_only() {
return Err(IoVecError::WriteOnlyDescriptor);
}
// We use get_slice instead of `get_host_address` here in order to have the whole
// range of the descriptor chain checked, i.e. [addr, addr + len) is a valid memory
// region in the GuestMemoryMmap.
let iov_base = mem
.get_slice(desc.addr, desc.len as usize)?
.ptr_guard_mut()
.as_ptr()
.cast::<c_void>();
self.vecs.push(iovec {
iov_base,
iov_len: desc.len as size_t,
});
self.len = self
.len
.checked_add(desc.len)
.ok_or(IoVecError::OverflowedDescriptor)?;
next_descriptor = desc.next_descriptor();
}
Ok(())
}
/// Create an `IoVecBuffer` from a `DescriptorChain`
///
/// # Safety
///
/// The descriptor chain cannot be referencing the same memory location as another chain
pub unsafe fn from_descriptor_chain(
mem: &GuestMemoryMmap,
head: DescriptorChain,
) -> Result<Self, IoVecError> {
let mut new_buffer = Self::default();
// SAFETY: descriptor chain cannot be referencing the same memory location as another chain
unsafe {
new_buffer.load_descriptor_chain(mem, head)?;
}
Ok(new_buffer)
}
/// Get the total length of the memory regions covered by this `IoVecBuffer`
pub(crate) fn len(&self) -> u32 {
self.len
}
/// Returns a pointer to the memory keeping the `iovec` structs
pub fn as_iovec_ptr(&self) -> *const iovec {
self.vecs.as_ptr()
}
/// Returns the length of the `iovec` array.
pub fn iovec_count(&self) -> usize {
self.vecs.len()
}
/// Clears the `iovec` array
pub fn clear(&mut self) {
self.vecs.clear();
self.len = 0u32;
}
/// Reads a number of bytes from the `IoVecBuffer` starting at a given offset.
///
/// This will try to fill `buf` reading bytes from the `IoVecBuffer` starting from
/// the given offset.
///
/// # Returns
///
/// `Ok(())` if `buf` was filled by reading from this [`IoVecBuffer`],
/// `Err(VolatileMemoryError::PartialBuffer)` if only part of `buf` could not be filled, and
/// `Err(VolatileMemoryError::OutOfBounds)` if `offset >= self.len()`.
pub fn read_exact_volatile_at(
&self,
mut buf: &mut [u8],
offset: usize,
) -> Result<(), VolatileMemoryError> {
if offset < self.len() as usize {
let expected = buf.len();
let bytes_read = self.read_volatile_at(&mut buf, offset, expected)?;
if bytes_read != expected {
return Err(VolatileMemoryError::PartialBuffer {
expected,
completed: bytes_read,
});
}
Ok(())
} else {
// If `offset` is past size, there's nothing to read.
Err(VolatileMemoryError::OutOfBounds { addr: offset })
}
}
/// Reads up to `len` bytes from the `IoVecBuffer` starting at the given offset.
///
/// This will try to write to the given [`WriteVolatile`].
pub fn read_volatile_at<W: WriteVolatile>(
&self,
dst: &mut W,
mut offset: usize,
mut len: usize,
) -> Result<usize, VolatileMemoryError> {
let mut total_bytes_read = 0;
for iov in &self.vecs {
if len == 0 {
break;
}
if offset >= iov.iov_len {
offset -= iov.iov_len;
continue;
}
let mut slice =
// SAFETY: the constructor IoVecBufferMut::from_descriptor_chain ensures that
// all iovecs contained point towards valid ranges of guest memory
unsafe { VolatileSlice::new(iov.iov_base.cast(), iov.iov_len).offset(offset)? };
offset = 0;
if slice.len() > len {
slice = slice.subslice(0, len)?;
}
match loop {
match dst.write_volatile(&slice) {
Err(VolatileMemoryError::IOError(err))
if err.kind() == ErrorKind::Interrupted => {}
result => break result,
}
} {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
if bytes_read < slice.len() {
break;
}
len -= bytes_read;
}
// exit successfully if we previously managed to write some bytes
Err(_) if total_bytes_read > 0 => break,
Err(err) => return Err(err),
}
}
Ok(total_bytes_read)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ParsedDescriptorChain {
pub head_index: u16,
pub length: u32,
pub nr_iovecs: u16,
}
/// This is essentially a wrapper of a `Vec<libc::iovec>` which can be passed to `libc::readv`.
///
/// It describes a write-only buffer passed to us by the guest that is scattered across multiple
/// memory regions. Additionally, this wrapper provides methods that allow reading arbitrary ranges
/// of data from that buffer.
/// `L` const generic value must be a multiple of 256 as required by the `IovDeque` requirements.
#[derive(Debug)]
pub struct IoVecBufferMut<const L: u16 = FIRECRACKER_MAX_QUEUE_SIZE> {
// container of the memory regions included in this IO vector
pub vecs: IovDeque<L>,
// Total length of the IoVecBufferMut
// We use `u32` here because we use this type in devices which
// should not give us huge buffers. In any case this
// value will not overflow as we explicitly check for this case.
pub len: u32,
}
// SAFETY: `IoVecBufferMut` doesn't allow for interior mutability and no shared ownership is
// possible as it doesn't implement clone
unsafe impl<const L: u16> Send for IoVecBufferMut<L> {}
impl<const L: u16> IoVecBufferMut<L> {
/// Append a `DescriptorChain` in this `IoVecBufferMut`
///
/// # Safety
///
/// The descriptor chain cannot be referencing the same memory location as another chain
pub unsafe fn append_descriptor_chain(
&mut self,
mem: &GuestMemoryMmap,
head: DescriptorChain,
) -> Result<ParsedDescriptorChain, IoVecError> {
let head_index = head.index;
let mut next_descriptor = Some(head);
let mut length = 0u32;
let mut nr_iovecs = 0u16;
while let Some(desc) = next_descriptor {
if !desc.is_write_only() {
self.vecs.pop_back(nr_iovecs);
return Err(IoVecError::ReadOnlyDescriptor);
}
// We use get_slice instead of `get_host_address` here in order to have the whole
// range of the descriptor chain checked, i.e. [addr, addr + len) is a valid memory
// region in the GuestMemoryMmap.
let slice = mem
.get_slice(desc.addr, desc.len as usize)
.inspect_err(|_| {
self.vecs.pop_back(nr_iovecs);
})?;
// We need to mark the area of guest memory that will be mutated through this
// IoVecBufferMut as dirty ahead of time, as we loose access to all
// vm-memory related information after converting down to iovecs.
slice.bitmap().mark_dirty(0, desc.len as usize);
let iov_base = slice.ptr_guard_mut().as_ptr().cast::<c_void>();
if self.vecs.is_full() {
self.vecs.pop_back(nr_iovecs);
return Err(IoVecError::IovDequeOverflow);
}
self.vecs.push_back(iovec {
iov_base,
iov_len: desc.len as size_t,
});
nr_iovecs += 1;
length = length
.checked_add(desc.len)
.ok_or(IoVecError::OverflowedDescriptor)
.inspect_err(|_| {
self.vecs.pop_back(nr_iovecs);
})?;
next_descriptor = desc.next_descriptor();
}
self.len = self.len.checked_add(length).ok_or_else(|| {
self.vecs.pop_back(nr_iovecs);
IoVecError::OverflowedDescriptor
})?;
Ok(ParsedDescriptorChain {
head_index,
length,
nr_iovecs,
})
}
/// Create an empty `IoVecBufferMut`.
pub fn new() -> Result<Self, IovDequeError> {
let vecs = IovDeque::new()?;
Ok(Self { vecs, len: 0 })
}
/// Create an `IoVecBufferMut` from a `DescriptorChain`
///
/// This will clear any previous `iovec` objects in the buffer and load the new
/// [`DescriptorChain`].
///
/// # Safety
///
/// The descriptor chain cannot be referencing the same memory location as another chain
pub unsafe fn load_descriptor_chain(
&mut self,
mem: &GuestMemoryMmap,
head: DescriptorChain,
) -> Result<(), IoVecError> {
self.clear();
// SAFETY: descriptor chain cannot be referencing the same memory location as another chain
let _ = unsafe { self.append_descriptor_chain(mem, head)? };
Ok(())
}
/// Drop descriptor chain from the `IoVecBufferMut` front
///
/// This will drop memory described by the `IoVecBufferMut` from the beginning.
pub fn drop_chain_front(&mut self, parse_descriptor: &ParsedDescriptorChain) {
self.vecs.pop_front(parse_descriptor.nr_iovecs);
self.len -= parse_descriptor.length;
}
/// Drop descriptor chain from the `IoVecBufferMut` back
///
/// This will drop memory described by the `IoVecBufferMut` from the beginning.
pub fn drop_chain_back(&mut self, parse_descriptor: &ParsedDescriptorChain) {
self.vecs.pop_back(parse_descriptor.nr_iovecs);
self.len -= parse_descriptor.length;
}
/// Create an `IoVecBuffer` from a `DescriptorChain`
///
/// # Safety
///
/// The descriptor chain cannot be referencing the same memory location as another chain
pub unsafe fn from_descriptor_chain(
mem: &GuestMemoryMmap,
head: DescriptorChain,
) -> Result<Self, IoVecError> {
let mut new_buffer = Self::new()?;
// SAFETY: descriptor chain cannot be referencing the same memory location as another chain
unsafe {
new_buffer.load_descriptor_chain(mem, head)?;
}
Ok(new_buffer)
}
/// Get the total length of the memory regions covered by this `IoVecBuffer`
#[inline(always)]
pub fn len(&self) -> u32 {
self.len
}
/// Returns true if buffer is empty.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns a pointer to the memory keeping the `iovec` structs
pub fn as_iovec_mut_slice(&mut self) -> &mut [iovec] {
self.vecs.as_mut_slice()
}
/// Clears the `iovec` array
pub fn clear(&mut self) {
self.vecs.clear();
self.len = 0;
}
/// Writes a number of bytes into the `IoVecBufferMut` starting at a given offset.
///
/// This will try to fill `IoVecBufferMut` writing bytes from the `buf` starting from
/// the given offset. It will write as many bytes from `buf` as they fit inside the
/// `IoVecBufferMut` starting from `offset`.
///
/// # Returns
///
/// `Ok(())` if the entire contents of `buf` could be written to this [`IoVecBufferMut`],
/// `Err(VolatileMemoryError::PartialBuffer)` if only part of `buf` could be transferred, and
/// `Err(VolatileMemoryError::OutOfBounds)` if `offset >= self.len()`.
pub fn write_all_volatile_at(
&mut self,
mut buf: &[u8],
offset: usize,
) -> Result<(), VolatileMemoryError> {
if offset < self.len() as usize {
let expected = buf.len();
let bytes_written = self.write_volatile_at(&mut buf, offset, expected)?;
if bytes_written != expected {
return Err(VolatileMemoryError::PartialBuffer {
expected,
completed: bytes_written,
});
}
Ok(())
} else {
// We cannot write past the end of the `IoVecBufferMut`.
Err(VolatileMemoryError::OutOfBounds { addr: offset })
}
}
/// Writes up to `len` bytes into the `IoVecBuffer` starting at the given offset.
///
/// This will try to write to the given [`WriteVolatile`].
pub fn write_volatile_at<W: ReadVolatile>(
&mut self,
src: &mut W,
mut offset: usize,
mut len: usize,
) -> Result<usize, VolatileMemoryError> {
let mut total_bytes_read = 0;
for iov in self.vecs.as_slice() {
if len == 0 {
break;
}
if offset >= iov.iov_len {
offset -= iov.iov_len;
continue;
}
let mut slice =
// SAFETY: the constructor IoVecBufferMut::from_descriptor_chain ensures that
// all iovecs contained point towards valid ranges of guest memory
unsafe { VolatileSlice::new(iov.iov_base.cast(), iov.iov_len).offset(offset)? };
offset = 0;
if slice.len() > len {
slice = slice.subslice(0, len)?;
}
match loop {
match src.read_volatile(&mut slice) {
Err(VolatileMemoryError::IOError(err))
if err.kind() == ErrorKind::Interrupted => {}
result => break result,
}
} {
Ok(bytes_read) => {
total_bytes_read += bytes_read;
if bytes_read < slice.len() {
break;
}
len -= bytes_read;
}
// exit successfully if we previously managed to read some bytes
Err(_) if total_bytes_read > 0 => break,
Err(err) => return Err(err),
}
}
Ok(total_bytes_read)
}
}
#[cfg(test)]
#[allow(clippy::cast_possible_truncation)]
mod tests {
use libc::{c_void, iovec};
use vm_memory::VolatileMemoryError;
use super::IoVecBuffer;
// Redefine `IoVecBufferMut` with specific length. Otherwise
// Rust will not know what to do.
type IoVecBufferMutDefault = super::IoVecBufferMut<FIRECRACKER_MAX_QUEUE_SIZE>;
use crate::devices::virtio::iov_deque::IovDeque;
use crate::devices::virtio::queue::{
FIRECRACKER_MAX_QUEUE_SIZE, Queue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE,
};
use crate::devices::virtio::test_utils::VirtQueue;
use crate::test_utils::multi_region_mem;
use crate::vstate::memory::{Bytes, GuestAddress, GuestMemoryMmap};
impl<'a> From<&'a [u8]> for IoVecBuffer {
fn from(buf: &'a [u8]) -> Self {
Self {
vecs: vec![iovec {
iov_base: buf.as_ptr() as *mut c_void,
iov_len: buf.len(),
}],
len: buf.len().try_into().unwrap(),
}
}
}
impl<'a> From<Vec<&'a [u8]>> for IoVecBuffer {
fn from(buffer: Vec<&'a [u8]>) -> Self {
let mut len = 0_u32;
let vecs = buffer
.into_iter()
.map(|slice| {
len += TryInto::<u32>::try_into(slice.len()).unwrap();
iovec {
iov_base: slice.as_ptr() as *mut c_void,
iov_len: slice.len(),
}
})
.collect();
Self { vecs, len }
}
}
impl<const L: u16> From<&mut [u8]> for super::IoVecBufferMut<L> {
fn from(buf: &mut [u8]) -> Self {
let mut vecs = IovDeque::new().unwrap();
vecs.push_back(iovec {
iov_base: buf.as_mut_ptr().cast::<c_void>(),
iov_len: buf.len(),
});
Self {
vecs,
len: buf.len() as u32,
}
}
}
impl<const L: u16> From<Vec<&mut [u8]>> for super::IoVecBufferMut<L> {
fn from(buffer: Vec<&mut [u8]>) -> Self {
let mut len = 0;
let mut vecs = IovDeque::new().unwrap();
for slice in buffer {
len += slice.len() as u32;
vecs.push_back(iovec {
iov_base: slice.as_ptr() as *mut c_void,
iov_len: slice.len(),
});
}
Self { vecs, len }
}
}
fn default_mem() -> GuestMemoryMmap {
multi_region_mem(&[
(GuestAddress(0), 0x10000),
(GuestAddress(0x20000), 0x10000),
(GuestAddress(0x40000), 0x10000),
])
}
fn chain(m: &GuestMemoryMmap, is_write_only: bool) -> (Queue, VirtQueue<'_>) {
let vq = VirtQueue::new(GuestAddress(0), m, 16);
let mut q = vq.create_queue();
q.ready = true;
let flags = if is_write_only {
VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE
} else {
VIRTQ_DESC_F_NEXT
};
for j in 0..4 {
vq.dtable[j as usize].set(0x20000 + 64 * u64::from(j), 64, flags, j + 1);
}
// one chain: (0, 1, 2, 3)
vq.dtable[3].flags.set(flags & !VIRTQ_DESC_F_NEXT);
vq.avail.ring[0].set(0);
vq.avail.idx.set(1);
(q, vq)
}
fn read_only_chain(mem: &GuestMemoryMmap) -> (Queue, VirtQueue<'_>) {
let v: Vec<u8> = (0..=255).collect();
mem.write_slice(&v, GuestAddress(0x20000)).unwrap();
chain(mem, false)
}
fn write_only_chain(mem: &GuestMemoryMmap) -> (Queue, VirtQueue<'_>) {
let v = vec![0; 256];
mem.write_slice(&v, GuestAddress(0x20000)).unwrap();
chain(mem, true)
}
#[test]
fn test_access_mode() {
let mem = default_mem();
let (mut q, _) = read_only_chain(&mem);
let head = q.pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded into one buffer
unsafe { IoVecBuffer::from_descriptor_chain(&mem, head).unwrap() };
let (mut q, _) = write_only_chain(&mem);
let head = q.pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded into one buffer
unsafe { IoVecBuffer::from_descriptor_chain(&mem, head).unwrap_err() };
let (mut q, _) = read_only_chain(&mem);
let head = q.pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded into one buffer
unsafe { IoVecBufferMutDefault::from_descriptor_chain(&mem, head).unwrap_err() };
let (mut q, _) = write_only_chain(&mem);
let head = q.pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded into one buffer
unsafe { IoVecBufferMutDefault::from_descriptor_chain(&mem, head).unwrap() };
}
#[test]
fn test_iovec_length() {
let mem = default_mem();
let (mut q, _) = read_only_chain(&mem);
let head = q.pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded once in this test
let iovec = unsafe { IoVecBuffer::from_descriptor_chain(&mem, head).unwrap() };
assert_eq!(iovec.len(), 4 * 64);
}
#[test]
fn test_iovec_mut_length() {
let mem = default_mem();
let (mut q, _) = write_only_chain(&mem);
let head = q.pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded once in this test
let mut iovec =
unsafe { IoVecBufferMutDefault::from_descriptor_chain(&mem, head).unwrap() };
assert_eq!(iovec.len(), 4 * 64);
// We are creating a new queue where we can get descriptors from. Probably, this is not
// something that we will ever want to do, as `IoVecBufferMut`s are typically
// (concpetually) associated with a single `Queue`. We just do this here to be able to test
// the appending logic.
let (mut q, _) = write_only_chain(&mem);
let head = q.pop().unwrap().unwrap();
// SAFETY: it is actually unsafe, but we just want to check the length of the
// `IoVecBufferMut` after appending.
let _ = unsafe { iovec.append_descriptor_chain(&mem, head).unwrap() };
assert_eq!(iovec.len(), 8 * 64);
}
#[test]
fn test_iovec_read_at() {
let mem = default_mem();
let (mut q, _) = read_only_chain(&mem);
let head = q.pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded once in this test
let iovec = unsafe { IoVecBuffer::from_descriptor_chain(&mem, head).unwrap() };
let mut buf = vec![0u8; 257];
assert_eq!(
iovec
.read_volatile_at(&mut buf.as_mut_slice(), 0, 257)
.unwrap(),
256
);
assert_eq!(buf[0..256], (0..=255).collect::<Vec<_>>());
assert_eq!(buf[256], 0);
let mut buf = vec![0; 5];
iovec.read_exact_volatile_at(&mut buf[..4], 0).unwrap();
assert_eq!(buf, vec![0u8, 1, 2, 3, 0]);
iovec.read_exact_volatile_at(&mut buf, 0).unwrap();
assert_eq!(buf, vec![0u8, 1, 2, 3, 4]);
iovec.read_exact_volatile_at(&mut buf, 1).unwrap();
assert_eq!(buf, vec![1u8, 2, 3, 4, 5]);
iovec.read_exact_volatile_at(&mut buf, 60).unwrap();
assert_eq!(buf, vec![60u8, 61, 62, 63, 64]);
assert_eq!(
iovec
.read_volatile_at(&mut buf.as_mut_slice(), 252, 5)
.unwrap(),
4
);
assert_eq!(buf[0..4], vec![252u8, 253, 254, 255]);
assert!(matches!(
iovec.read_exact_volatile_at(&mut buf, 252),
Err(VolatileMemoryError::PartialBuffer {
expected: 5,
completed: 4
})
));
assert!(matches!(
iovec.read_exact_volatile_at(&mut buf, 256),
Err(VolatileMemoryError::OutOfBounds { addr: 256 })
));
}
#[test]
fn test_iovec_mut_write_at() {
let mem = default_mem();
let (mut q, vq) = write_only_chain(&mem);
// This is a descriptor chain with 4 elements 64 bytes long each.
let head = q.pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded into one buffer
let mut iovec =
unsafe { IoVecBufferMutDefault::from_descriptor_chain(&mem, head).unwrap() };
let buf = vec![0u8, 1, 2, 3, 4];
// One test vector for each part of the chain
let mut test_vec1 = vec![0u8; 64];
let mut test_vec2 = vec![0u8; 64];
let test_vec3 = vec![0u8; 64];
let mut test_vec4 = vec![0u8; 64];
// Control test: Initially all three regions should be zero
iovec.write_all_volatile_at(&test_vec1, 0).unwrap();
iovec.write_all_volatile_at(&test_vec2, 64).unwrap();
iovec.write_all_volatile_at(&test_vec3, 128).unwrap();
iovec.write_all_volatile_at(&test_vec4, 192).unwrap();
vq.dtable[0].check_data(&test_vec1);
vq.dtable[1].check_data(&test_vec2);
vq.dtable[2].check_data(&test_vec3);
vq.dtable[3].check_data(&test_vec4);
// Let's initialize test_vec1 with our buffer.
test_vec1[..buf.len()].copy_from_slice(&buf);
// And write just a part of it
iovec.write_all_volatile_at(&buf[..3], 0).unwrap();
// Not all 5 bytes from buf should be written in memory,
// just 3 of them.
vq.dtable[0].check_data(&[0u8, 1, 2, 0, 0]);
vq.dtable[1].check_data(&test_vec2);
vq.dtable[2].check_data(&test_vec3);
vq.dtable[3].check_data(&test_vec4);
// But if we write the whole `buf` in memory then all
// of it should be observable.
iovec.write_all_volatile_at(&buf, 0).unwrap();
vq.dtable[0].check_data(&test_vec1);
vq.dtable[1].check_data(&test_vec2);
vq.dtable[2].check_data(&test_vec3);
vq.dtable[3].check_data(&test_vec4);
// We are now writing with an offset of 1. So, initialize
// the corresponding part of `test_vec1`
test_vec1[1..buf.len() + 1].copy_from_slice(&buf);
iovec.write_all_volatile_at(&buf, 1).unwrap();
vq.dtable[0].check_data(&test_vec1);
vq.dtable[1].check_data(&test_vec2);
vq.dtable[2].check_data(&test_vec3);
vq.dtable[3].check_data(&test_vec4);
// Perform a write that traverses two of the underlying
// regions. Writing at offset 60 should write 4 bytes on the
// first region and one byte on the second
test_vec1[60..64].copy_from_slice(&buf[0..4]);
test_vec2[0] = 4;
iovec.write_all_volatile_at(&buf, 60).unwrap();
vq.dtable[0].check_data(&test_vec1);
vq.dtable[1].check_data(&test_vec2);
vq.dtable[2].check_data(&test_vec3);
vq.dtable[3].check_data(&test_vec4);
test_vec4[63] = 3;
test_vec4[62] = 2;
test_vec4[61] = 1;
// Now perform a write that does not fit in the buffer. Try writing
// 5 bytes at offset 252 (only 4 bytes left).
test_vec4[60..64].copy_from_slice(&buf[0..4]);
assert_eq!(
iovec.write_volatile_at(&mut &*buf, 252, buf.len()).unwrap(),
4
);
vq.dtable[0].check_data(&test_vec1);
vq.dtable[1].check_data(&test_vec2);
vq.dtable[2].check_data(&test_vec3);
vq.dtable[3].check_data(&test_vec4);
// Trying to add past the end of the buffer should not write anything
assert!(matches!(
iovec.write_all_volatile_at(&buf, 256),
Err(VolatileMemoryError::OutOfBounds { addr: 256 })
));
vq.dtable[0].check_data(&test_vec1);
vq.dtable[1].check_data(&test_vec2);
vq.dtable[2].check_data(&test_vec3);
vq.dtable[3].check_data(&test_vec4);
}
}
#[cfg(kani)]
#[allow(dead_code)] // Avoid warning when using stubs
mod verification {
use std::mem::ManuallyDrop;
use libc::{c_void, iovec};
use vm_memory::VolatileSlice;
use vm_memory::bitmap::BitmapSlice;
use super::IoVecBuffer;
use crate::arch::GUEST_PAGE_SIZE;
use crate::devices::virtio::iov_deque::IovDeque;
// Redefine `IoVecBufferMut` and `IovDeque` with specific length. Otherwise
// Rust will not know what to do.
type IoVecBufferMutDefault = super::IoVecBufferMut<FIRECRACKER_MAX_QUEUE_SIZE>;
type IovDequeDefault = IovDeque<FIRECRACKER_MAX_QUEUE_SIZE>;
use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE;
// Maximum memory size to use for our buffers. For the time being 1KB.
const GUEST_MEMORY_SIZE: usize = 1 << 10;
// Maximum number of descriptors in a chain to use in our proofs. The value is selected upon
// experimenting with the execution time. Typically, in our virtio devices we use queues of up
// to 256 entries which is the theoretical maximum length of a `DescriptorChain`, but in reality
// our code does not make any assumption about the length of the chain, apart from it being
// >= 1.
const MAX_DESC_LENGTH: usize = 4;
mod stubs {
use super::*;
/// This is a stub for the `IovDeque::push_back` method.
///
/// `IovDeque` relies on a special allocation of two pages of virtual memory, where both of
/// these point to the same underlying physical page. This way, the contents of the first
/// page of virtual memory are automatically mirrored in the second virtual page. We do
/// that in order to always have the elements that are currently in the ring buffer in
/// consecutive (virtual) memory.
///
/// To build this particular memory layout we create a new `memfd` object, allocate memory
/// with `mmap` and call `mmap` again to make sure both pages point to the page allocated
/// via the `memfd` object. These ffi calls make kani complain, so here we mock the
/// `IovDeque` object memory with a normal memory allocation of two pages worth of data.
///
/// This stub helps imitate the effect of mirroring without all the elaborate memory
/// allocation trick.
pub fn push_back<const L: u16>(deque: &mut IovDeque<L>, iov: iovec) {
// This should NEVER happen, since our ring buffer is as big as the maximum queue size.
// We also check for the sanity of the VirtIO queues, in queue.rs, which means that if
// we ever try to add something in a full ring buffer, there is an internal
// bug in the device emulation logic. Panic here because the device is
// hopelessly broken.
assert!(
!deque.is_full(),
"The number of `iovec` objects is bigger than the available space"
);
let offset = (deque.start + deque.len) as usize;
let mirror = if offset >= L as usize {
offset - L as usize
} else {
offset + L as usize
};
// SAFETY: self.iov is a valid pointer and `self.start + self.len` is within range (we
// asserted before that the buffer is not full).
unsafe { deque.iov.add(offset).write_volatile(iov) };
unsafe { deque.iov.add(mirror).write_volatile(iov) };
deque.len += 1;
}
}
fn create_iovecs(mem: *mut u8, size: usize, nr_descs: usize) -> (Vec<iovec>, u32) {
let mut vecs: Vec<iovec> = Vec::with_capacity(nr_descs);
let mut len = 0u32;
for _ in 0..nr_descs {
// The `IoVecBuffer` constructors ensure that the memory region described by every
// `Descriptor` in the chain is a valid, i.e. it is memory with then guest's memory
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vhost_user.rs | src/vmm/src/devices/virtio/vhost_user.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Portions Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::os::fd::AsRawFd;
use std::os::unix::net::UnixStream;
use std::sync::Arc;
use vhost::vhost_user::message::*;
use vhost::vhost_user::{Frontend, VhostUserFrontend};
use vhost::{Error as VhostError, VhostBackend, VhostUserMemoryRegionInfo, VringConfigData};
use vm_memory::{Address, GuestMemory, GuestMemoryError, GuestMemoryRegion};
use vmm_sys_util::eventfd::EventFd;
use crate::devices::virtio::queue::Queue;
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::vstate::memory::GuestMemoryMmap;
/// vhost-user error.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VhostUserError {
/// Invalid available address
AvailAddress(GuestMemoryError),
/// Failed to connect to UDS Unix stream: {0}
Connect(#[from] std::io::Error),
/// Invalid descriptor table address
DescriptorTableAddress(GuestMemoryError),
/// Get features failed: {0}
VhostUserGetFeatures(VhostError),
/// Get protocol features failed: {0}
VhostUserGetProtocolFeatures(VhostError),
/// Set owner failed: {0}
VhostUserSetOwner(VhostError),
/// Set features failed: {0}
VhostUserSetFeatures(VhostError),
/// Set protocol features failed: {0}
VhostUserSetProtocolFeatures(VhostError),
/// Set mem table failed: {0}
VhostUserSetMemTable(VhostError),
/// Set vring num failed: {0}
VhostUserSetVringNum(VhostError),
/// Set vring addr failed: {0}
VhostUserSetVringAddr(VhostError),
/// Set vring base failed: {0}
VhostUserSetVringBase(VhostError),
/// Set vring call failed: {0}
VhostUserSetVringCall(VhostError),
/// Set vring kick failed: {0}
VhostUserSetVringKick(VhostError),
/// Set vring enable failed: {0}
VhostUserSetVringEnable(VhostError),
/// Failed to read vhost eventfd: No memory region found
VhostUserNoMemoryRegion,
/// Invalid used address
UsedAddress(GuestMemoryError),
}
// Trait with all methods we use from `Frontend` from vhost crate.
// It allows us to create a mock implementation of the `Frontend`
// to verify calls to the backend.
// All methods have default impl in order to simplify mock impls.
pub trait VhostUserHandleBackend: Sized {
/// Constructor of `Frontend`
fn from_stream(_sock: UnixStream, _max_queue_num: u64) -> Self {
unimplemented!()
}
fn set_hdr_flags(&self, _flags: VhostUserHeaderFlag) {
unimplemented!()
}
/// Get from the underlying vhost implementation the feature bitmask.
fn get_features(&self) -> Result<u64, vhost::Error> {
unimplemented!()
}
/// Enable features in the underlying vhost implementation using a bitmask.
fn set_features(&self, _features: u64) -> Result<(), vhost::Error> {
unimplemented!()
}
/// Set the current Frontend as an owner of the session.
fn set_owner(&self) -> Result<(), vhost::Error> {
unimplemented!()
}
/// Set the memory map regions on the slave so it can translate the vring
/// addresses. In the ancillary data there is an array of file descriptors
fn set_mem_table(&self, _regions: &[VhostUserMemoryRegionInfo]) -> Result<(), vhost::Error> {
unimplemented!()
}
/// Set the size of the queue.
fn set_vring_num(&self, _queue_index: usize, _num: u16) -> Result<(), vhost::Error> {
unimplemented!()
}
/// Sets the addresses of the different aspects of the vring.
fn set_vring_addr(
&self,
_queue_index: usize,
_config_data: &VringConfigData,
) -> Result<(), vhost::Error> {
unimplemented!()
}
/// Sets the base offset in the available vring.
fn set_vring_base(&self, _queue_index: usize, _base: u16) -> Result<(), vhost::Error> {
unimplemented!()
}
/// Set the event file descriptor to signal when buffers are used.
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// will be used instead of waiting for the call.
fn set_vring_call(&self, _queue_index: usize, _fd: &EventFd) -> Result<(), vhost::Error> {
unimplemented!()
}
/// Set the event file descriptor for adding buffers to the vring.
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// should be used instead of waiting for a kick.
fn set_vring_kick(&self, _queue_index: usize, _fd: &EventFd) -> Result<(), vhost::Error> {
unimplemented!()
}
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures, vhost::Error> {
unimplemented!()
}
fn set_protocol_features(
&mut self,
_features: VhostUserProtocolFeatures,
) -> Result<(), vhost::Error> {
unimplemented!()
}
fn set_vring_enable(&mut self, _queue_index: usize, _enable: bool) -> Result<(), vhost::Error> {
unimplemented!()
}
fn get_config(
&mut self,
_offset: u32,
_size: u32,
_flags: VhostUserConfigFlags,
_buf: &[u8],
) -> Result<(VhostUserConfig, VhostUserConfigPayload), vhost::Error> {
unimplemented!()
}
fn set_config(
&mut self,
_offset: u32,
_flags: VhostUserConfigFlags,
_buf: &[u8],
) -> Result<(), vhost::Error> {
unimplemented!()
}
}
impl VhostUserHandleBackend for Frontend {
fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
Frontend::from_stream(sock, max_queue_num)
}
fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) {
self.set_hdr_flags(flags)
}
/// Get from the underlying vhost implementation the feature bitmask.
fn get_features(&self) -> Result<u64, vhost::Error> {
<Frontend as VhostBackend>::get_features(self)
}
/// Enable features in the underlying vhost implementation using a bitmask.
fn set_features(&self, features: u64) -> Result<(), vhost::Error> {
<Frontend as VhostBackend>::set_features(self, features)
}
/// Set the current Frontend as an owner of the session.
fn set_owner(&self) -> Result<(), vhost::Error> {
<Frontend as VhostBackend>::set_owner(self)
}
/// Set the memory map regions on the slave so it can translate the vring
/// addresses. In the ancillary data there is an array of file descriptors
fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<(), vhost::Error> {
<Frontend as VhostBackend>::set_mem_table(self, regions)
}
/// Set the size of the queue.
fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<(), vhost::Error> {
<Frontend as VhostBackend>::set_vring_num(self, queue_index, num)
}
/// Sets the addresses of the different aspects of the vring.
fn set_vring_addr(
&self,
queue_index: usize,
config_data: &VringConfigData,
) -> Result<(), vhost::Error> {
<Frontend as VhostBackend>::set_vring_addr(self, queue_index, config_data)
}
/// Sets the base offset in the available vring.
fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<(), vhost::Error> {
<Frontend as VhostBackend>::set_vring_base(self, queue_index, base)
}
/// Set the event file descriptor to signal when buffers are used.
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// will be used instead of waiting for the call.
fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<(), vhost::Error> {
<Frontend as VhostBackend>::set_vring_call(self, queue_index, fd)
}
/// Set the event file descriptor for adding buffers to the vring.
/// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
/// is set when there is no file descriptor in the ancillary data. This signals that polling
/// should be used instead of waiting for a kick.
fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<(), vhost::Error> {
<Frontend as VhostBackend>::set_vring_kick(self, queue_index, fd)
}
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures, vhost::Error> {
<Frontend as VhostUserFrontend>::get_protocol_features(self)
}
fn set_protocol_features(
&mut self,
features: VhostUserProtocolFeatures,
) -> Result<(), vhost::Error> {
<Frontend as VhostUserFrontend>::set_protocol_features(self, features)
}
fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<(), vhost::Error> {
<Frontend as VhostUserFrontend>::set_vring_enable(self, queue_index, enable)
}
fn get_config(
&mut self,
offset: u32,
size: u32,
flags: VhostUserConfigFlags,
buf: &[u8],
) -> Result<(VhostUserConfig, VhostUserConfigPayload), vhost::Error> {
<Frontend as VhostUserFrontend>::get_config(self, offset, size, flags, buf)
}
fn set_config(
&mut self,
offset: u32,
flags: VhostUserConfigFlags,
buf: &[u8],
) -> Result<(), vhost::Error> {
<Frontend as VhostUserFrontend>::set_config(self, offset, flags, buf)
}
}
pub type VhostUserHandle = VhostUserHandleImpl<Frontend>;
/// vhost-user socket handle
#[derive(Clone)]
pub struct VhostUserHandleImpl<T: VhostUserHandleBackend> {
pub vu: T,
pub socket_path: String,
}
impl<T: VhostUserHandleBackend> std::fmt::Debug for VhostUserHandleImpl<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("VhostUserHandle")
.field("socket_path", &self.socket_path)
.finish()
}
}
impl<T: VhostUserHandleBackend> VhostUserHandleImpl<T> {
/// Connect to the vhost-user backend socket and mark self as an
/// owner of the session.
pub fn new(socket_path: &str, num_queues: u64) -> Result<Self, VhostUserError> {
let stream = UnixStream::connect(socket_path).map_err(VhostUserError::Connect)?;
let vu = T::from_stream(stream, num_queues);
vu.set_owner().map_err(VhostUserError::VhostUserSetOwner)?;
Ok(Self {
vu,
socket_path: socket_path.to_string(),
})
}
/// Set vhost-user features to the backend.
pub fn set_features(&self, features: u64) -> Result<(), VhostUserError> {
self.vu
.set_features(features)
.map_err(VhostUserError::VhostUserSetFeatures)
}
/// Set vhost-user protocol features to the backend.
pub fn set_protocol_features(
&mut self,
acked_features: u64,
acked_protocol_features: u64,
) -> Result<(), VhostUserError> {
if acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() != 0
&& let Some(acked_protocol_features) =
VhostUserProtocolFeatures::from_bits(acked_protocol_features)
{
self.vu
.set_protocol_features(acked_protocol_features)
.map_err(VhostUserError::VhostUserSetProtocolFeatures)?;
if acked_protocol_features.contains(VhostUserProtocolFeatures::REPLY_ACK) {
self.vu.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
}
}
Ok(())
}
/// Negotiate virtio and protocol features with the backend.
pub fn negotiate_features(
&mut self,
avail_features: u64,
avail_protocol_features: VhostUserProtocolFeatures,
) -> Result<(u64, u64), VhostUserError> {
// Get features from backend, do negotiation to get a feature collection which
// both VMM and backend support.
let backend_features = self
.vu
.get_features()
.map_err(VhostUserError::VhostUserGetFeatures)?;
let acked_features = avail_features & backend_features;
let acked_protocol_features =
// If frontend can negotiate protocol features.
if acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() != 0 {
let backend_protocol_features = self
.vu
.get_protocol_features()
.map_err(VhostUserError::VhostUserGetProtocolFeatures)?;
let acked_protocol_features = avail_protocol_features & backend_protocol_features;
self.vu
.set_protocol_features(acked_protocol_features)
.map_err(VhostUserError::VhostUserSetProtocolFeatures)?;
acked_protocol_features
} else {
VhostUserProtocolFeatures::empty()
};
if acked_protocol_features.contains(VhostUserProtocolFeatures::REPLY_ACK) {
self.vu.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
}
Ok((acked_features, acked_protocol_features.bits()))
}
/// Update guest memory table to the backend.
fn update_mem_table(&self, mem: &GuestMemoryMmap) -> Result<(), VhostUserError> {
let mut regions: Vec<VhostUserMemoryRegionInfo> = Vec::new();
for region in mem.iter() {
let (mmap_handle, mmap_offset) = match region.file_offset() {
Some(_file_offset) => (_file_offset.file().as_raw_fd(), _file_offset.start()),
None => {
return Err(VhostUserError::VhostUserNoMemoryRegion);
}
};
let vhost_user_net_reg = VhostUserMemoryRegionInfo {
guest_phys_addr: region.start_addr().raw_value(),
memory_size: region.len(),
userspace_addr: region.inner.as_ptr() as u64,
mmap_offset,
mmap_handle,
};
regions.push(vhost_user_net_reg);
}
self.vu
.set_mem_table(regions.as_slice())
.map_err(VhostUserError::VhostUserSetMemTable)?;
Ok(())
}
/// Set up vhost-user backend. This includes updating memory table,
/// sending information about virtio rings and enabling them.
pub fn setup_backend(
&mut self,
mem: &GuestMemoryMmap,
queues: &[(usize, &Queue, &EventFd)],
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), VhostUserError> {
// Provide the memory table to the backend.
self.update_mem_table(mem)?;
// Send set_vring_num here, since it could tell backends, like SPDK,
// how many virt queues to be handled, which backend required to know
// at early stage.
for (queue_index, queue, _) in queues.iter() {
self.vu
.set_vring_num(*queue_index, queue.size)
.map_err(VhostUserError::VhostUserSetVringNum)?;
}
for (queue_index, queue, queue_evt) in queues.iter() {
let config_data = VringConfigData {
queue_max_size: queue.max_size,
queue_size: queue.size,
flags: 0u32,
desc_table_addr: mem
.get_host_address(queue.desc_table_address)
.map_err(VhostUserError::DescriptorTableAddress)?
as u64,
used_ring_addr: mem
.get_host_address(queue.used_ring_address)
.map_err(VhostUserError::UsedAddress)? as u64,
avail_ring_addr: mem
.get_host_address(queue.avail_ring_address)
.map_err(VhostUserError::AvailAddress)? as u64,
log_addr: None,
};
self.vu
.set_vring_addr(*queue_index, &config_data)
.map_err(VhostUserError::VhostUserSetVringAddr)?;
self.vu
.set_vring_base(*queue_index, queue.avail_ring_idx_get())
.map_err(VhostUserError::VhostUserSetVringBase)?;
// No matter the queue, we set irq_evt for signaling the guest that buffers were
// consumed.
self.vu
.set_vring_call(
*queue_index,
interrupt
.notifier(VirtioInterruptType::Queue(
(*queue_index).try_into().unwrap_or_else(|_| {
panic!("vhost-user: invalid queue index: {}", *queue_index)
}),
))
.as_ref()
.unwrap(),
)
.map_err(VhostUserError::VhostUserSetVringCall)?;
self.vu
.set_vring_kick(*queue_index, queue_evt)
.map_err(VhostUserError::VhostUserSetVringKick)?;
self.vu
.set_vring_enable(*queue_index, true)
.map_err(VhostUserError::VhostUserSetVringEnable)?;
}
Ok(())
}
}
#[cfg(test)]
pub(crate) mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::fs::File;
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::devices::virtio::test_utils::default_interrupt;
use crate::test_utils::create_tmp_socket;
use crate::vstate::memory;
use crate::vstate::memory::{GuestAddress, GuestRegionMmapExt};
pub(crate) fn create_mem(file: File, regions: &[(GuestAddress, usize)]) -> GuestMemoryMmap {
GuestMemoryMmap::from_regions(
memory::create(
regions.iter().copied(),
libc::MAP_PRIVATE,
Some(file),
false,
)
.unwrap()
.into_iter()
.map(|region| GuestRegionMmapExt::dram_from_mmap_region(region, 0))
.collect(),
)
.unwrap()
}
#[test]
fn test_new() {
struct MockFrontend {
sock: UnixStream,
max_queue_num: u64,
is_owner: std::cell::UnsafeCell<bool>,
}
impl VhostUserHandleBackend for MockFrontend {
fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
Self {
sock,
max_queue_num,
is_owner: std::cell::UnsafeCell::new(false),
}
}
fn set_owner(&self) -> Result<(), vhost::Error> {
unsafe { *self.is_owner.get() = true };
Ok(())
}
}
let max_queue_num = 69;
let (_tmp_dir, tmp_socket_path) = create_tmp_socket();
// Creation of the VhostUserHandleImpl correctly connects to the socket, sets the maximum
// number of queues and sets itself as an owner of the session.
let vuh =
VhostUserHandleImpl::<MockFrontend>::new(&tmp_socket_path, max_queue_num).unwrap();
assert_eq!(
vuh.vu
.sock
.peer_addr()
.unwrap()
.as_pathname()
.unwrap()
.to_str()
.unwrap(),
&tmp_socket_path,
);
assert_eq!(vuh.vu.max_queue_num, max_queue_num);
assert!(unsafe { *vuh.vu.is_owner.get() });
}
#[test]
fn test_set_features() {
struct MockFrontend {
features: std::cell::UnsafeCell<u64>,
}
impl VhostUserHandleBackend for MockFrontend {
fn set_features(&self, features: u64) -> Result<(), vhost::Error> {
unsafe { *self.features.get() = features };
Ok(())
}
}
// VhostUserHandleImpl can correctly set backend features.
let vuh = VhostUserHandleImpl {
vu: MockFrontend { features: 0.into() },
socket_path: "".to_string(),
};
vuh.set_features(0x69).unwrap();
assert_eq!(unsafe { *vuh.vu.features.get() }, 0x69);
}
#[test]
fn test_set_protocol_features() {
struct MockFrontend {
protocol_features: VhostUserProtocolFeatures,
hdr_flags: std::cell::UnsafeCell<VhostUserHeaderFlag>,
}
impl VhostUserHandleBackend for MockFrontend {
fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) {
unsafe { *self.hdr_flags.get() = flags };
}
fn set_protocol_features(
&mut self,
features: VhostUserProtocolFeatures,
) -> Result<(), vhost::Error> {
self.protocol_features = features;
Ok(())
}
}
let mut vuh = VhostUserHandleImpl {
vu: MockFrontend {
protocol_features: VhostUserProtocolFeatures::empty(),
hdr_flags: std::cell::UnsafeCell::new(VhostUserHeaderFlag::empty()),
},
socket_path: "".to_string(),
};
// No protocol features are set if acked_features do not have PROTOCOL_FEATURES bit
let acked_features = 0;
let acked_protocol_features = VhostUserProtocolFeatures::empty();
vuh.set_protocol_features(acked_features, acked_protocol_features.bits())
.unwrap();
assert_eq!(vuh.vu.protocol_features, VhostUserProtocolFeatures::empty());
assert_eq!(
unsafe { &*vuh.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::empty().bits()
);
// No protocol features are set if acked_features do not have PROTOCOL_FEATURES bit
let acked_features = 0;
let acked_protocol_features = VhostUserProtocolFeatures::all();
vuh.set_protocol_features(acked_features, acked_protocol_features.bits())
.unwrap();
assert_eq!(vuh.vu.protocol_features, VhostUserProtocolFeatures::empty());
assert_eq!(
unsafe { &*vuh.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::empty().bits()
);
// If not REPLY_ACK present, no header is set
let acked_features = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
let mut acked_protocol_features = VhostUserProtocolFeatures::all();
acked_protocol_features.set(VhostUserProtocolFeatures::REPLY_ACK, false);
vuh.set_protocol_features(acked_features, acked_protocol_features.bits())
.unwrap();
assert_eq!(vuh.vu.protocol_features, acked_protocol_features);
assert_eq!(
unsafe { &*vuh.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::empty().bits()
);
// If REPLY_ACK present, header is set
let acked_features = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
let acked_protocol_features = VhostUserProtocolFeatures::all();
vuh.set_protocol_features(acked_features, acked_protocol_features.bits())
.unwrap();
assert_eq!(vuh.vu.protocol_features, acked_protocol_features);
assert_eq!(
unsafe { &*vuh.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::NEED_REPLY.bits()
);
}
#[test]
fn test_negotiate_features() {
struct MockFrontend {
features: u64,
protocol_features: VhostUserProtocolFeatures,
hdr_flags: std::cell::UnsafeCell<VhostUserHeaderFlag>,
}
impl VhostUserHandleBackend for MockFrontend {
fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) {
unsafe { *self.hdr_flags.get() = flags };
}
fn get_features(&self) -> Result<u64, vhost::Error> {
Ok(self.features)
}
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures, vhost::Error> {
Ok(self.protocol_features)
}
fn set_protocol_features(
&mut self,
features: VhostUserProtocolFeatures,
) -> Result<(), vhost::Error> {
self.protocol_features = features;
Ok(())
}
}
let mut vuh = VhostUserHandleImpl {
vu: MockFrontend {
features: 0,
protocol_features: VhostUserProtocolFeatures::empty(),
hdr_flags: std::cell::UnsafeCell::new(VhostUserHeaderFlag::empty()),
},
socket_path: "".to_string(),
};
// If nothing is available, nothing is negotiated
let avail_features = 0;
let avail_protocol_features = VhostUserProtocolFeatures::empty();
let (acked_features, acked_protocol_features) = vuh
.negotiate_features(avail_features, avail_protocol_features)
.unwrap();
assert_eq!(acked_features, avail_features);
assert_eq!(acked_protocol_features, avail_protocol_features.bits());
assert_eq!(vuh.vu.protocol_features, VhostUserProtocolFeatures::empty());
assert_eq!(
unsafe { &*vuh.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::empty().bits()
);
// If neither frontend avail_features nor backend avail_features contain PROTOCOL_FEATURES
// bit, only features are negotiated
let mut avail_features = VhostUserVirtioFeatures::all();
avail_features.set(VhostUserVirtioFeatures::PROTOCOL_FEATURES, false);
// Pretend backend has same features as frontend
vuh.vu.features = avail_features.bits();
let avail_protocol_features = VhostUserProtocolFeatures::empty();
let (acked_features, acked_protocol_features) = vuh
.negotiate_features(avail_features.bits(), avail_protocol_features)
.unwrap();
assert_eq!(acked_features, avail_features.bits());
assert_eq!(acked_protocol_features, avail_protocol_features.bits());
assert_eq!(vuh.vu.protocol_features, VhostUserProtocolFeatures::empty());
assert_eq!(
unsafe { &*vuh.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::empty().bits()
);
// If PROTOCOL_FEATURES is negotiated, but REPLY_ACK is not, headers are not set
let avail_features = VhostUserVirtioFeatures::all();
// Pretend backend has same features as frontend
vuh.vu.features = avail_features.bits();
let mut avail_protocol_features = VhostUserProtocolFeatures::empty();
avail_protocol_features.set(VhostUserProtocolFeatures::CONFIG, true);
let mut backend_protocol_features = VhostUserProtocolFeatures::empty();
backend_protocol_features.set(VhostUserProtocolFeatures::CONFIG, true);
backend_protocol_features.set(VhostUserProtocolFeatures::PAGEFAULT, true);
vuh.vu.protocol_features = backend_protocol_features;
let (acked_features, acked_protocol_features) = vuh
.negotiate_features(avail_features.bits(), avail_protocol_features)
.unwrap();
assert_eq!(acked_features, avail_features.bits());
assert_eq!(acked_protocol_features, avail_protocol_features.bits());
assert_eq!(vuh.vu.protocol_features, avail_protocol_features);
assert_eq!(
unsafe { &*vuh.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::empty().bits()
);
// If PROTOCOL_FEATURES and REPLY_ACK are negotiated
let avail_features = VhostUserVirtioFeatures::all();
// Pretend backend has same features as frontend
vuh.vu.features = avail_features.bits();
let mut avail_protocol_features = VhostUserProtocolFeatures::empty();
avail_protocol_features.set(VhostUserProtocolFeatures::REPLY_ACK, true);
// Pretend backend has same features as frontend
vuh.vu.protocol_features = avail_protocol_features;
let (acked_features, acked_protocol_features) = vuh
.negotiate_features(avail_features.bits(), avail_protocol_features)
.unwrap();
assert_eq!(acked_features, avail_features.bits());
assert_eq!(acked_protocol_features, avail_protocol_features.bits());
assert_eq!(vuh.vu.protocol_features, avail_protocol_features);
assert_eq!(
unsafe { &*vuh.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::NEED_REPLY.bits(),
);
}
#[test]
fn test_update_mem_table() {
struct MockFrontend {
regions: std::cell::UnsafeCell<Vec<VhostUserMemoryRegionInfo>>,
}
impl VhostUserHandleBackend for MockFrontend {
fn set_mem_table(
&self,
regions: &[VhostUserMemoryRegionInfo],
) -> Result<(), vhost::Error> {
unsafe { (*self.regions.get()).extend_from_slice(regions) }
Ok(())
}
}
let vuh = VhostUserHandleImpl {
vu: MockFrontend {
regions: std::cell::UnsafeCell::new(vec![]),
},
socket_path: "".to_string(),
};
let region_size = 0x10000;
let file = TempFile::new().unwrap().into_file();
let file_size = 2 * region_size;
file.set_len(file_size as u64).unwrap();
let regions = vec![
(GuestAddress(0x0), region_size),
(GuestAddress(0x10000), region_size),
];
let guest_memory = create_mem(file, ®ions);
vuh.update_mem_table(&guest_memory).unwrap();
// VhostUserMemoryRegionInfo should be correctly set by the VhostUserHandleImpl
let expected_regions = guest_memory
.iter()
.map(|region| VhostUserMemoryRegionInfo {
guest_phys_addr: region.start_addr().raw_value(),
memory_size: region.len(),
userspace_addr: region.inner.as_ptr() as u64,
mmap_offset: region.file_offset().unwrap().start(),
mmap_handle: region.file_offset().unwrap().file().as_raw_fd(),
})
.collect::<Vec<_>>();
for (region, expected) in (unsafe { &*vuh.vu.regions.get() })
.iter()
.zip(expected_regions)
{
// VhostUserMemoryRegionInfo does not implement Eq.
assert_eq!(region.guest_phys_addr, expected.guest_phys_addr);
assert_eq!(region.memory_size, expected.memory_size);
assert_eq!(region.userspace_addr, expected.userspace_addr);
assert_eq!(region.mmap_offset, expected.mmap_offset);
assert_eq!(region.mmap_handle, expected.mmap_handle);
}
}
#[test]
fn test_setup_backend() {
#[derive(Default)]
struct VringData {
index: usize,
size: u16,
config: VringConfigData,
base: u16,
call: i32,
kick: i32,
enable: bool,
}
struct MockFrontend {
vrings: std::cell::UnsafeCell<Vec<VringData>>,
}
impl VhostUserHandleBackend for MockFrontend {
fn set_mem_table(
&self,
_regions: &[VhostUserMemoryRegionInfo],
) -> Result<(), vhost::Error> {
Ok(())
}
fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<(), vhost::Error> {
unsafe {
(*self.vrings.get()).push(VringData {
index: queue_index,
size: num,
..Default::default()
})
};
Ok(())
}
fn set_vring_addr(
&self,
queue_index: usize,
config_data: &VringConfigData,
) -> Result<(), vhost::Error> {
unsafe { (&mut (*self.vrings.get()))[queue_index].config = *config_data };
Ok(())
}
fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<(), vhost::Error> {
unsafe { (&mut (*self.vrings.get()))[queue_index].base = base };
Ok(())
}
fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<(), vhost::Error> {
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/queue.rs | src/vmm/src/devices/virtio/queue.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::num::Wrapping;
use std::sync::atomic::{Ordering, fence};
use crate::logger::error;
use crate::utils::u64_to_usize;
use crate::vstate::memory::{Bitmap, ByteValued, GuestAddress, GuestMemory};
pub const VIRTQ_DESC_F_NEXT: u16 = 0x1;
pub const VIRTQ_DESC_F_WRITE: u16 = 0x2;
/// Max size of virtio queues offered by firecracker's virtio devices.
pub(super) const FIRECRACKER_MAX_QUEUE_SIZE: u16 = 256;
// GuestMemoryMmap::read_obj_from_addr() will be used to fetch the descriptor,
// which has an explicit constraint that the entire descriptor doesn't
// cross the page boundary. Otherwise the descriptor may be split into
// two mmap regions which causes failure of GuestMemoryMmap::read_obj_from_addr().
//
// The Virtio Spec 1.0 defines the alignment of VirtIO descriptor is 16 bytes,
// which fulfills the explicit constraint of GuestMemoryMmap::read_obj_from_addr().
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum QueueError {
/// Descriptor index out of bounds: {0}.
DescIndexOutOfBounds(u16),
/// Failed to write value into the virtio queue used ring: {0}
MemoryError(#[from] vm_memory::GuestMemoryError),
/// Pointer is not aligned properly: {0:#x} not {1}-byte aligned.
PointerNotAligned(usize, usize),
/// Attempt to use virtio queue that is not marked ready
NotReady,
/// Virtio queue with invalid size: {0}
InvalidSize(u16),
}
/// Error type indicating the guest configured a virtio queue such that the avail_idx field would
/// indicate there are more descriptors to process than the queue actually has space for.
///
/// Should this error bubble up to the event loop, we exit Firecracker, since this could be a
/// potential malicious driver scenario. This way we also eliminate the risk of repeatedly
/// logging and potentially clogging the microVM through the log system.
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
#[error(
"The number of available virtio descriptors {reported_len} is greater than queue size: \
{queue_size}!"
)]
pub struct InvalidAvailIdx {
queue_size: u16,
reported_len: u16,
}
/// A virtio descriptor constraints with C representative.
/// Taken from Virtio spec:
/// https://docs.oasis-open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.html#x1-430008
/// 2.6.5 The Virtqueue Descriptor Table
#[repr(C)]
#[derive(Debug, Default, Clone, Copy)]
pub struct Descriptor {
pub addr: u64,
pub len: u32,
pub flags: u16,
pub next: u16,
}
// SAFETY: `Descriptor` is a POD and contains no padding.
unsafe impl ByteValued for Descriptor {}
/// A virtio used element in the used ring.
/// Taken from Virtio spec:
/// https://docs.oasis-open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.html#x1-430008
/// 2.6.8 The Virtqueue Used Ring
#[repr(C)]
#[derive(Debug, Default, Clone, Copy)]
pub struct UsedElement {
pub id: u32,
pub len: u32,
}
// SAFETY: `UsedElement` is a POD and contains no padding.
unsafe impl ByteValued for UsedElement {}
/// A virtio descriptor chain.
#[derive(Debug, Copy, Clone)]
pub struct DescriptorChain {
desc_table_ptr: *const Descriptor,
queue_size: u16,
ttl: u16, // used to prevent infinite chain cycles
/// Index into the descriptor table
pub index: u16,
/// Guest physical address of device specific data
pub addr: GuestAddress,
/// Length of device specific data
pub len: u32,
/// Includes next, write, and indirect bits
pub flags: u16,
/// Index into the descriptor table of the next descriptor if flags has
/// the next bit set
pub next: u16,
}
impl DescriptorChain {
/// Creates a new `DescriptorChain` from the given memory and descriptor table.
///
/// Note that the desc_table and queue_size are assumed to be validated by the caller.
fn checked_new(desc_table_ptr: *const Descriptor, queue_size: u16, index: u16) -> Option<Self> {
if queue_size <= index {
return None;
}
// SAFETY:
// index is in 0..queue_size bounds
let desc = unsafe { desc_table_ptr.add(usize::from(index)).read_volatile() };
let chain = DescriptorChain {
desc_table_ptr,
queue_size,
ttl: queue_size,
index,
addr: GuestAddress(desc.addr),
len: desc.len,
flags: desc.flags,
next: desc.next,
};
if chain.is_valid() { Some(chain) } else { None }
}
fn is_valid(&self) -> bool {
!self.has_next() || self.next < self.queue_size
}
/// Gets if this descriptor chain has another descriptor chain linked after it.
pub fn has_next(&self) -> bool {
self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
}
/// If the driver designated this as a write only descriptor.
///
/// If this is false, this descriptor is read only.
/// Write only means the emulated device can write and the driver can read.
pub fn is_write_only(&self) -> bool {
self.flags & VIRTQ_DESC_F_WRITE != 0
}
/// Gets the next descriptor in this descriptor chain, if there is one.
///
/// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
/// the head of the next _available_ descriptor chain.
pub fn next_descriptor(&self) -> Option<Self> {
if self.has_next() {
DescriptorChain::checked_new(self.desc_table_ptr, self.queue_size, self.next).map(
|mut c| {
c.ttl = self.ttl - 1;
c
},
)
} else {
None
}
}
}
#[derive(Debug)]
pub struct DescriptorIterator(Option<DescriptorChain>);
impl IntoIterator for DescriptorChain {
type Item = DescriptorChain;
type IntoIter = DescriptorIterator;
fn into_iter(self) -> Self::IntoIter {
DescriptorIterator(Some(self))
}
}
impl Iterator for DescriptorIterator {
type Item = DescriptorChain;
fn next(&mut self) -> Option<Self::Item> {
self.0.take().inspect(|desc| {
self.0 = desc.next_descriptor();
})
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
/// A virtio queue's parameters.
pub struct Queue {
/// The maximal size in elements offered by the device
pub max_size: u16,
/// The queue size in elements the driver selected
pub size: u16,
/// Indicates if the queue is finished with configuration
pub ready: bool,
/// Guest physical address of the descriptor table
pub desc_table_address: GuestAddress,
/// Guest physical address of the available ring
pub avail_ring_address: GuestAddress,
/// Guest physical address of the used ring
pub used_ring_address: GuestAddress,
/// Host virtual address pointer to the descriptor table
/// in the guest memory .
/// Getting access to the underling
/// data structure should only occur after the
/// struct is initialized with `new`.
/// Representation of in memory struct layout.
/// struct DescriptorTable = [Descriptor; <queue_size>]
pub desc_table_ptr: *const Descriptor,
/// Host virtual address pointer to the available ring
/// in the guest memory .
/// Getting access to the underling
/// data structure should only occur after the
/// struct is initialized with `new`.
///
/// Representation of in memory struct layout.
/// struct AvailRing {
/// flags: u16,
/// idx: u16,
/// ring: [u16; <queue size>],
/// used_event: u16,
/// }
///
/// Because all types in the AvailRing are u16,
/// we store pointer as *mut u16 for simplicity.
pub avail_ring_ptr: *mut u16,
/// Host virtual address pointer to the used ring
/// in the guest memory .
/// Getting access to the underling
/// data structure should only occur after the
/// struct is initialized with `new`.
///
/// Representation of in memory struct layout.
// struct UsedRing {
// flags: u16,
// idx: u16,
// ring: [UsedElement; <queue size>],
// avail_event: u16,
// }
/// Because types in the UsedRing are different (u16 and u32)
/// store pointer as *mut u8.
pub used_ring_ptr: *mut u8,
pub next_avail: Wrapping<u16>,
pub next_used: Wrapping<u16>,
/// VIRTIO_F_RING_EVENT_IDX negotiated (notification suppression enabled)
pub uses_notif_suppression: bool,
/// The number of added used buffers since last guest kick
pub num_added: Wrapping<u16>,
}
/// SAFETY: Queue is Send, because we use volatile memory accesses when
/// working with pointers. These pointers are not copied or store anywhere
/// else. We assume guest will not give different queues same guest memory
/// addresses.
unsafe impl Send for Queue {}
#[allow(clippy::len_without_is_empty)]
impl Queue {
/// Constructs an empty virtio queue with the given `max_size`.
pub fn new(max_size: u16) -> Queue {
Queue {
max_size,
size: max_size,
ready: false,
desc_table_address: GuestAddress(0),
avail_ring_address: GuestAddress(0),
used_ring_address: GuestAddress(0),
desc_table_ptr: std::ptr::null(),
avail_ring_ptr: std::ptr::null_mut(),
used_ring_ptr: std::ptr::null_mut(),
next_avail: Wrapping(0),
next_used: Wrapping(0),
uses_notif_suppression: false,
num_added: Wrapping(0),
}
}
fn desc_table_size(&self) -> usize {
std::mem::size_of::<Descriptor>() * usize::from(self.size)
}
fn avail_ring_size(&self) -> usize {
std::mem::size_of::<u16>()
+ std::mem::size_of::<u16>()
+ std::mem::size_of::<u16>() * usize::from(self.size)
+ std::mem::size_of::<u16>()
}
fn used_ring_size(&self) -> usize {
std::mem::size_of::<u16>()
+ std::mem::size_of::<u16>()
+ std::mem::size_of::<UsedElement>() * usize::from(self.size)
+ std::mem::size_of::<u16>()
}
fn get_aligned_slice_ptr<T, M: GuestMemory>(
&self,
mem: &M,
addr: GuestAddress,
len: usize,
alignment: usize,
) -> Result<*mut T, QueueError> {
// Guest memory base address is page aligned, so as long as alignment divides page size,
// It suffices to check that the GPA is properly aligned (e.g. we don't need to recheck
// the HVA).
if addr.0 & (alignment as u64 - 1) != 0 {
return Err(QueueError::PointerNotAligned(
u64_to_usize(addr.0),
alignment,
));
}
let slice = mem.get_slice(addr, len).map_err(QueueError::MemoryError)?;
slice.bitmap().mark_dirty(0, len);
Ok(slice.ptr_guard_mut().as_ptr().cast())
}
/// Set up pointers to the queue objects in the guest memory
/// and mark memory dirty for those objects
pub fn initialize<M: GuestMemory>(&mut self, mem: &M) -> Result<(), QueueError> {
if !self.ready {
return Err(QueueError::NotReady);
}
if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0 {
return Err(QueueError::InvalidSize(self.size));
}
// All the below pointers are verified to be aligned properly; otherwise some methods (e.g.
// `read_volatile()`) will panic. Such an unalignment is possible when restored from a
// broken/fuzzed snapshot.
//
// Specification of those pointers' alignments
// https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-350007
// > ================ ==========
// > Virtqueue Part Alignment
// > ================ ==========
// > Descriptor Table 16
// > Available Ring 2
// > Used Ring 4
// > ================ ==========
self.desc_table_ptr =
self.get_aligned_slice_ptr(mem, self.desc_table_address, self.desc_table_size(), 16)?;
self.avail_ring_ptr =
self.get_aligned_slice_ptr(mem, self.avail_ring_address, self.avail_ring_size(), 2)?;
self.used_ring_ptr =
self.get_aligned_slice_ptr(mem, self.used_ring_address, self.used_ring_size(), 4)?;
Ok(())
}
/// Get AvailRing.idx
#[inline(always)]
pub fn avail_ring_idx_get(&self) -> u16 {
// SAFETY: `idx` is 1 u16 away from the start
unsafe { self.avail_ring_ptr.add(1).read_volatile() }
}
/// Get element from AvailRing.ring at index
/// # Safety
/// The `index` parameter should be in 0..queue_size bounds
#[inline(always)]
unsafe fn avail_ring_ring_get(&self, index: usize) -> u16 {
// SAFETY: `ring` is 2 u16 away from the start
unsafe { self.avail_ring_ptr.add(2).add(index).read_volatile() }
}
/// Get AvailRing.used_event
#[inline(always)]
pub fn avail_ring_used_event_get(&self) -> u16 {
// SAFETY: `used_event` is 2 + self.len u16 away from the start
unsafe {
self.avail_ring_ptr
.add(2_usize.unchecked_add(usize::from(self.size)))
.read_volatile()
}
}
/// Set UsedRing.idx
#[inline(always)]
pub fn used_ring_idx_set(&mut self, val: u16) {
// SAFETY: `idx` is 1 u16 away from the start
unsafe {
self.used_ring_ptr
.add(std::mem::size_of::<u16>())
.cast::<u16>()
.write_volatile(val)
}
}
/// Get element from UsedRing.ring at index
/// # Safety
/// The `index` parameter should be in 0..queue_size bounds
#[inline(always)]
unsafe fn used_ring_ring_set(&mut self, index: usize, val: UsedElement) {
// SAFETY: `ring` is 2 u16 away from the start
unsafe {
self.used_ring_ptr
.add(std::mem::size_of::<u16>().unchecked_mul(2))
.cast::<UsedElement>()
.add(index)
.write_volatile(val)
}
}
#[cfg(any(test, kani))]
#[inline(always)]
pub fn used_ring_avail_event_get(&mut self) -> u16 {
// SAFETY: `avail_event` is 2 * u16 and self.len * UsedElement away from the start
unsafe {
self.used_ring_ptr
.add(
std::mem::size_of::<u16>().unchecked_mul(2)
+ std::mem::size_of::<UsedElement>().unchecked_mul(usize::from(self.size)),
)
.cast::<u16>()
.read_volatile()
}
}
/// Set UsedRing.avail_event
#[inline(always)]
pub fn used_ring_avail_event_set(&mut self, val: u16) {
// SAFETY: `avail_event` is 2 * u16 and self.len * UsedElement away from the start
unsafe {
self.used_ring_ptr
.add(
std::mem::size_of::<u16>().unchecked_mul(2)
+ std::mem::size_of::<UsedElement>().unchecked_mul(usize::from(self.size)),
)
.cast::<u16>()
.write_volatile(val)
}
}
/// Returns the number of yet-to-be-popped descriptor chains in the avail ring.
pub fn len(&self) -> u16 {
(Wrapping(self.avail_ring_idx_get()) - self.next_avail).0
}
/// Checks if the driver has made any descriptor chains available in the avail ring.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Pop the first available descriptor chain from the avail ring.
///
/// If this function returns an error at runtime, then the guest has requested Firecracker
/// to process more virtio descriptors than there can possibly be given the queue's size.
/// This can be a malicious guest driver scenario, and hence a DoS attempt. If encountered
/// and runtime, correct handling is to panic!
///
/// This function however is also called on paths that can (and should) just report
/// the error to the user (e.g. loading a corrupt snapshot file), and hence cannot panic on its
/// own.
pub fn pop(&mut self) -> Result<Option<DescriptorChain>, InvalidAvailIdx> {
let len = self.len();
// The number of descriptor chain heads to process should always
// be smaller or equal to the queue size, as the driver should
// never ask the VMM to process a available ring entry more than
// once. Checking and reporting such incorrect driver behavior
// can prevent potential hanging and Denial-of-Service from
// happening on the VMM side.
if self.size < len {
return Err(InvalidAvailIdx {
queue_size: self.size,
reported_len: len,
});
}
if len == 0 {
return Ok(None);
}
Ok(self.pop_unchecked())
}
/// Try to pop the first available descriptor chain from the avail ring.
/// If no descriptor is available, enable notifications.
///
/// If this function returns an error at runtime, then the guest has requested Firecracker
/// to process more virtio descriptors than there can possibly be given the queue's size.
/// This can be a malicious guest driver scenario, and hence a DoS attempt. If encountered
/// and runtime, correct handling is to panic!
///
/// This function however is also called on paths that can (and should) just report
/// the error to the user (e.g. loading a corrupt snapshot file), and hence cannot panic on its
/// own.
pub fn pop_or_enable_notification(
&mut self,
) -> Result<Option<DescriptorChain>, InvalidAvailIdx> {
if !self.uses_notif_suppression {
return self.pop();
}
if self.try_enable_notification()? {
return Ok(None);
}
Ok(self.pop_unchecked())
}
/// Pop the first available descriptor chain from the avail ring.
///
/// # Important
/// This is an internal method that ASSUMES THAT THERE ARE AVAILABLE DESCRIPTORS. Otherwise it
/// will retrieve a descriptor that contains garbage data (obsolete/empty).
fn pop_unchecked(&mut self) -> Option<DescriptorChain> {
// This fence ensures all subsequent reads see the updated driver writes.
fence(Ordering::Acquire);
// We'll need to find the first available descriptor, that we haven't yet popped.
// In a naive notation, that would be:
// `descriptor_table[avail_ring[next_avail]]`.
//
// We use `self.next_avail` to store the position, in `ring`, of the next available
// descriptor index, with a twist: we always only increment `self.next_avail`, so the
// actual position will be `self.next_avail % self.size`.
let idx = self.next_avail.0 % self.size;
// SAFETY:
// index is bound by the queue size
let desc_index = unsafe { self.avail_ring_ring_get(usize::from(idx)) };
DescriptorChain::checked_new(self.desc_table_ptr, self.size, desc_index).inspect(|_| {
self.next_avail += Wrapping(1);
})
}
/// Undo the effects of the last `self.pop()` call.
/// The caller can use this, if it was unable to consume the last popped descriptor chain.
pub fn undo_pop(&mut self) {
self.next_avail -= Wrapping(1);
}
/// Write used element into used_ring ring.
/// - [`ring_index_offset`] is an offset added to the current [`self.next_used`] to obtain
/// actual index into used_ring.
pub fn write_used_element(
&mut self,
ring_index_offset: u16,
desc_index: u16,
len: u32,
) -> Result<(), QueueError> {
if self.size <= desc_index {
error!(
"attempted to add out of bounds descriptor to used ring: {}",
desc_index
);
return Err(QueueError::DescIndexOutOfBounds(desc_index));
}
let next_used = (self.next_used + Wrapping(ring_index_offset)).0 % self.size;
let used_element = UsedElement {
id: u32::from(desc_index),
len,
};
// SAFETY:
// index is bound by the queue size
unsafe {
self.used_ring_ring_set(usize::from(next_used), used_element);
}
Ok(())
}
/// Advance queue and used ring by `n` elements.
pub fn advance_next_used(&mut self, n: u16) {
self.num_added += Wrapping(n);
self.next_used += Wrapping(n);
}
/// Set the used ring index to the current `next_used` value.
/// Should be called once after number of `add_used` calls.
pub fn advance_used_ring_idx(&mut self) {
// This fence ensures all descriptor writes are visible before the index update is.
fence(Ordering::Release);
self.used_ring_idx_set(self.next_used.0);
}
/// Puts an available descriptor head into the used ring for use by the guest.
pub fn add_used(&mut self, desc_index: u16, len: u32) -> Result<(), QueueError> {
self.write_used_element(0, desc_index, len)?;
self.advance_next_used(1);
Ok(())
}
/// Try to enable notification events from the guest driver. Returns true if notifications were
/// successfully enabled. Otherwise it means that one or more descriptors can still be consumed
/// from the available ring and we can't guarantee that there will be a notification. In this
/// case the caller might want to consume the mentioned descriptors and call this method again.
fn try_enable_notification(&mut self) -> Result<bool, InvalidAvailIdx> {
// If the device doesn't use notification suppression, we'll continue to get notifications
// no matter what.
if !self.uses_notif_suppression {
return Ok(true);
}
let len = self.len();
if len != 0 {
// The number of descriptor chain heads to process should always
// be smaller or equal to the queue size.
if len > self.size {
return Err(InvalidAvailIdx {
queue_size: self.size,
reported_len: len,
});
}
return Ok(false);
}
// Set the next expected avail_idx as avail_event.
self.used_ring_avail_event_set(self.next_avail.0);
// Make sure all subsequent reads are performed after we set avail_event.
fence(Ordering::SeqCst);
// If the actual avail_idx is different than next_avail one or more descriptors can still
// be consumed from the available ring.
Ok(self.next_avail.0 == self.avail_ring_idx_get())
}
/// Enable notification suppression.
pub fn enable_notif_suppression(&mut self) {
self.uses_notif_suppression = true;
}
/// Check if we need to kick the guest.
///
/// Please note this method has side effects: once it returns `true`, it considers the
/// driver will actually be notified, and won't return `true` again until the driver
/// updates `used_event` and/or the notification conditions hold once more.
///
/// This is similar to the `vring_need_event()` method implemented by the Linux kernel.
pub fn prepare_kick(&mut self) -> bool {
// If the device doesn't use notification suppression, always return true
if !self.uses_notif_suppression {
return true;
}
// We need to expose used array entries before checking the used_event.
fence(Ordering::SeqCst);
let new = self.next_used;
let old = self.next_used - self.num_added;
let used_event = Wrapping(self.avail_ring_used_event_get());
self.num_added = Wrapping(0);
new - used_event - Wrapping(1) < new - old
}
/// Resets the Virtio Queue
pub(crate) fn reset(&mut self) {
self.ready = false;
self.size = self.max_size;
self.desc_table_address = GuestAddress(0);
self.avail_ring_address = GuestAddress(0);
self.used_ring_address = GuestAddress(0);
self.next_avail = Wrapping(0);
self.next_used = Wrapping(0);
self.num_added = Wrapping(0);
self.uses_notif_suppression = false;
}
}
#[cfg(kani)]
#[allow(dead_code)]
mod verification {
use std::mem::ManuallyDrop;
use std::num::Wrapping;
use vm_memory::{GuestMemoryRegion, MemoryRegionAddress};
use super::*;
use crate::vstate::memory::{Bytes, FileOffset, GuestAddress, GuestMemory, MmapRegion};
/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
/// search to determine which region a specific read or write request goes to,
/// this only uses a single region. Eliminating this binary search significantly
/// speeds up all queue proofs, because it eliminates the only loop contained herein,
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
/// it works identically to `GuestMemoryMmap` with only a single contained region.
pub struct ProofGuestMemory {
the_region: vm_memory::GuestRegionMmap,
}
impl GuestMemory for ProofGuestMemory {
type R = vm_memory::GuestRegionMmap;
fn num_regions(&self) -> usize {
1
}
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
self.the_region
.to_region_addr(addr)
.map(|_| &self.the_region)
}
fn iter(&self) -> impl Iterator<Item = &Self::R> {
std::iter::once(&self.the_region)
}
fn try_access<F>(
&self,
count: usize,
addr: GuestAddress,
mut f: F,
) -> vm_memory::guest_memory::Result<usize>
where
F: FnMut(
usize,
usize,
MemoryRegionAddress,
&Self::R,
) -> vm_memory::guest_memory::Result<usize>,
{
// We only have a single region, meaning a lot of the complications of the default
// try_access implementation for dealing with reads/writes across multiple
// regions does not apply.
let region_addr = self
.the_region
.to_region_addr(addr)
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
self.the_region
.checked_offset(region_addr, count)
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
f(0, count, region_addr, &self.the_region)
}
}
pub struct ProofContext(pub Queue, pub ProofGuestMemory);
pub struct MmapRegionStub {
addr: *mut u8,
size: usize,
bitmap: (),
file_offset: Option<FileOffset>,
prot: i32,
flags: i32,
owned: bool,
hugetlbfs: Option<bool>,
}
/// We start the first guest memory region at an offset so that harnesses using
/// Queue::any() will be exposed to queue segments both before and after valid guest memory.
const GUEST_MEMORY_BASE: u64 = 512;
// We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
// to make sure we not only test queues where all segments are consecutively aligned (at least
// for those proofs that use a completely arbitrary queue structure).
// We need to give at least 16 bytes of buffer space for the descriptor table to be
// able to change its address, as it is 16-byte aligned.
const GUEST_MEMORY_SIZE: usize = (QUEUE_END - QUEUE_BASE_ADDRESS) as usize + 30;
fn guest_memory(memory: *mut u8) -> ProofGuestMemory {
// Ideally, we'd want to do
// let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
// .with_raw_mmap_pointer(bytes.as_mut_ptr())
// .build()
// .unwrap()};
// However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
// Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
// standard library using a special version of the libc crate, it runs into some problems
// [1] Even if we work around those problems, we run into performance problems [2].
// Therefore, for now we stick to this ugly transmute hack (which only works because
// the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
//
// [1]: https://github.com/model-checking/kani/issues/2673
// [2]: https://github.com/model-checking/kani/issues/2538
let region_stub = MmapRegionStub {
addr: memory,
size: GUEST_MEMORY_SIZE,
bitmap: Default::default(),
file_offset: None,
prot: 0,
flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
owned: false,
hugetlbfs: None,
};
let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) };
let guest_region =
vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap();
// Use a single memory region, just as firecracker does for guests of size < 2GB
// For largest guests, firecracker uses two regions (due to the MMIO gap being
// at the top of 32-bit address space)
ProofGuestMemory {
the_region: guest_region,
}
}
// can't implement kani::Arbitrary for the relevant types due to orphan rules
fn setup_kani_guest_memory() -> ProofGuestMemory {
// Non-deterministic Vec that will be used as the guest memory. We use `exact_vec` for now
// as `any_vec` will likely result in worse performance. We do not loose much from
// `exact_vec`, as our proofs do not make any assumptions about "filling" guest
// memory: Since everything is placed at non-deterministic addresses with
// non-deterministic lengths, we still cover all scenarios that would be covered by
// smaller guest memory closely. We leak the memory allocated here, so that it
// doesnt get deallocated at the end of this function. We do not explicitly
// de-allocate, but since this is a kani proof, that does not matter.
guest_memory(
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(),
)
}
// Constants describing the in-memory layout of a queue of size FIRECRACKER_MAX_SIZE starting
// at the beginning of guest memory. These are based on Section 2.6 of the VirtIO 1.1
// specification.
const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE;
/// descriptor table has 16 bytes per entry, avail ring starts right after
const AVAIL_RING_BASE_ADDRESS: u64 =
QUEUE_BASE_ADDRESS + FIRECRACKER_MAX_QUEUE_SIZE as u64 * 16;
/// Used ring starts after avail ring (which has size 6 + 2 * FIRECRACKER_MAX_QUEUE_SIZE),
/// and needs 2 bytes of padding
const USED_RING_BASE_ADDRESS: u64 =
AVAIL_RING_BASE_ADDRESS + 6 + 2 * FIRECRACKER_MAX_QUEUE_SIZE as u64 + 2;
/// The address of the first byte after the queue (which starts at QUEUE_BASE_ADDRESS).
/// Note that the used ring structure has size 6 + 8 * FIRECRACKER_MAX_QUEUE_SIZE
const QUEUE_END: u64 = USED_RING_BASE_ADDRESS + 6 + 8 * FIRECRACKER_MAX_QUEUE_SIZE as u64;
fn less_arbitrary_queue() -> Queue {
let mut queue = Queue::new(FIRECRACKER_MAX_QUEUE_SIZE);
queue.size = FIRECRACKER_MAX_QUEUE_SIZE;
queue.ready = true;
queue.desc_table_address = GuestAddress(QUEUE_BASE_ADDRESS);
queue.avail_ring_address = GuestAddress(AVAIL_RING_BASE_ADDRESS);
queue.used_ring_address = GuestAddress(USED_RING_BASE_ADDRESS);
queue.next_avail = Wrapping(kani::any());
queue.next_used = Wrapping(kani::any());
queue.uses_notif_suppression = kani::any();
queue.num_added = Wrapping(kani::any());
queue
}
impl ProofContext {
/// Creates a [`ProofContext`] where the queue layout is not arbitrary and instead
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/iov_deque.rs | src/vmm/src/devices/virtio/iov_deque.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::os::fd::AsRawFd;
use libc::{c_int, c_void, iovec, off_t, size_t};
use memfd;
use crate::arch::host_page_size;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum IovDequeError {
/// Error with memfd: {0}
Memfd(#[from] memfd::Error),
/// Error while resizing memfd: {0}
MemfdResize(std::io::Error),
/// Error calling mmap: {0}
Mmap(std::io::Error),
}
/// ['IovDeque'] is a ring buffer tailored for `struct iovec` objects.
///
/// From the point of view of API, [`IovDeque`] is a typical ring buffer that allows us to push
/// `struct iovec` objects at the end of the buffer and pop them from its beginning.
///
/// It is tailored to store `struct iovec` objects that described memory that was passed to us from
/// the guest via a VirtIO queue. This allows us to assume the maximum size of a ring buffer (the
/// negotiated size of the queue).
// An important feature of the data structure is that it can give us a slice of all `struct iovec`
// objects in the queue, so that we can use this `&mut [iovec]` to perform operations such as
// `readv`. A typical implementation of a ring buffer allows for entries to wrap around the end of
// the underlying buffer. For example, a ring buffer with a capacity of 10 elements which
// currently holds 4 elements can look like this:
//
// tail head
// | |
// v v
// +---+---+---+---+---+---+---+---+---+---+
// ring buffer: | C | D | | | | | | | A | B |
// +---+---+---+---+---+---+---+---+---+---+
//
// When getting a slice for this data we should get something like that: &[A, B, C, D], which
// would require copies in order to make the elements continuous in memory.
//
// In order to avoid that and make the operation of getting a slice more efficient, we implement
// the optimization described in the "Optimization" section of the "Circular buffer" wikipedia
// entry: https://en.wikipedia.org/wiki/Circular_buffer. The optimization consists of allocating
// double the size of the virtual memory required for the buffer and map both parts on the same
// physical address. Looking at the same example as before, we should get, this picture:
//
// head | tail
// | | |
// v | v
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// | C | D | | | | | | | A | B | C | D | | | | | | | A | B |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// First virtual page | Second virtual page
// |
// |
//
// Virtual memory
// ---------------------------------------------------------------------------------------
// Physical memory
//
// +---+---+---+---+---+---+---+---+---+---+
// | C | D | | | | | | | A | B |
// +---+---+---+---+---+---+---+---+---+---+
//
// Like that, the elements stored in the buffer are always laid out in contiguous virtual memory,
// so making a slice out of them does not require any copies.
//
// The `L` const generic determines the maximum number of `iovec` elements the queue should hold
// at any point in time. The actual capacity of the queue may differ and will depend on the host
// page size.
//
// ```Rust
// pub struct iovec {
// pub iov_base: *mut ::c_void,
// pub iov_len: ::size_t,
// }
// ```
#[derive(Debug)]
pub struct IovDeque<const L: u16> {
pub iov: *mut libc::iovec,
pub start: u16,
pub len: u16,
pub capacity: u16,
}
// SAFETY: This is `Send`. We hold sole ownership of the underlying buffer.
unsafe impl<const L: u16> Send for IovDeque<L> {}
impl<const L: u16> IovDeque<L> {
/// Create a [`memfd`] object that represents a single physical page
fn create_memfd(pages_bytes: usize) -> Result<memfd::Memfd, IovDequeError> {
// Create a sealable memfd.
let opts = memfd::MemfdOptions::default().allow_sealing(true);
let mfd = opts.create("iov_deque")?;
// Resize to system page size.
mfd.as_file()
.set_len(pages_bytes.try_into().unwrap())
.map_err(IovDequeError::MemfdResize)?;
// Add seals to prevent further resizing.
mfd.add_seals(&[memfd::FileSeal::SealShrink, memfd::FileSeal::SealGrow])?;
// Prevent further sealing changes.
mfd.add_seal(memfd::FileSeal::SealSeal)?;
Ok(mfd)
}
/// A safe wrapper on top of libc's `mmap` system call
///
/// # Safety: Callers need to make sure that the arguments to `mmap` are valid
unsafe fn mmap(
addr: *mut c_void,
len: size_t,
prot: c_int,
flags: c_int,
fd: c_int,
offset: off_t,
) -> Result<*mut c_void, IovDequeError> {
// SAFETY: caller should ensure the parameters are valid
let ptr = unsafe { libc::mmap(addr, len, prot, flags, fd, offset) };
if ptr == libc::MAP_FAILED {
return Err(IovDequeError::Mmap(std::io::Error::last_os_error()));
}
Ok(ptr)
}
/// Allocate memory for our ring buffer
///
/// This will allocate 2 * `pages_bytes` bytes of virtual memory.
fn allocate_ring_buffer_memory(pages_bytes: usize) -> Result<*mut c_void, IovDequeError> {
// SAFETY: We are calling the system call with valid arguments
unsafe {
Self::mmap(
std::ptr::null_mut(),
pages_bytes * 2,
libc::PROT_NONE,
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
-1,
0,
)
}
}
/// Calculate a number of bytes in full pages required for
/// the type to operate.
fn pages_bytes() -> usize {
let host_page_size = host_page_size();
let bytes = L as usize * std::mem::size_of::<iovec>();
let num_host_pages = bytes.div_ceil(host_page_size);
num_host_pages * host_page_size
}
/// Create a new [`IovDeque`] that can hold memory described by a single VirtIO queue.
pub fn new() -> Result<Self, IovDequeError> {
let pages_bytes = Self::pages_bytes();
let capacity = pages_bytes / std::mem::size_of::<iovec>();
let capacity: u16 = capacity.try_into().unwrap();
assert!(
L <= capacity,
"Actual capacity {} is smaller than requested capacity {}",
capacity,
L
);
let memfd = Self::create_memfd(pages_bytes)?;
let raw_memfd = memfd.as_file().as_raw_fd();
let buffer = Self::allocate_ring_buffer_memory(pages_bytes)?;
// Map the first page of virtual memory to the physical page described by the memfd object
// SAFETY: We are calling the system call with valid arguments
let _ = unsafe {
Self::mmap(
buffer,
pages_bytes,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED | libc::MAP_FIXED,
raw_memfd,
0,
)
}?;
// Map the second page of virtual memory to the physical page described by the memfd object
//
// SAFETY: This is safe because:
// * Both `buffer` and the result of `buffer.add(pages_bytes)` are within bounds of the
// allocation we got from `Self::allocate_ring_buffer_memory`.
// * The resulting pointer is the beginning of the second page of our allocation, so it
// doesn't wrap around the address space.
let next_page = unsafe { buffer.add(pages_bytes) };
// SAFETY: We are calling the system call with valid arguments
let _ = unsafe {
Self::mmap(
next_page,
pages_bytes,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED | libc::MAP_FIXED,
raw_memfd,
0,
)
}?;
Ok(Self {
iov: buffer.cast(),
start: 0,
len: 0,
capacity,
})
}
/// Returns the number of `iovec` objects currently in the [`IovDeque`]
#[inline(always)]
pub fn len(&self) -> u16 {
self.len
}
/// Returns `true` if the [`IovDeque`] is full, `false` otherwise
#[inline(always)]
pub fn is_full(&self) -> bool {
self.len() == L
}
/// Resets the queue, dropping all its elements.
#[inline(always)]
pub fn clear(&mut self) {
self.start = 0;
self.len = 0;
}
/// Adds an `iovec` in the ring buffer.
///
/// Returns an `IovDequeError::Full` error if the buffer is full.
pub fn push_back(&mut self, iov: iovec) {
// This should NEVER happen, since our ring buffer is as big as the maximum queue size.
// We also check for the sanity of the VirtIO queues, in queue.rs, which means that if we
// ever try to add something in a full ring buffer, there is an internal bug in the device
// emulation logic. Panic here because the device is hopelessly broken.
assert!(
!self.is_full(),
"The number of `iovec` objects is bigger than the available space"
);
// SAFETY: self.iov is a valid pointer and `self.start + self.len` is within range (we
// asserted before that the buffer is not full).
unsafe {
self.iov
.add((self.start + self.len) as usize)
.write_volatile(iov)
};
self.len += 1;
}
/// Pops the first `nr_iovecs` iovecs from the front of the buffer.
///
/// This will panic if we are asked
/// to pop more iovecs than what is currently available in the buffer.
pub fn pop_front(&mut self, nr_iovecs: u16) {
assert!(
self.len() >= nr_iovecs,
"Internal bug! Trying to drop more iovec objects than what is available"
);
self.start += nr_iovecs;
self.len -= nr_iovecs;
if self.capacity <= self.start {
self.start -= self.capacity;
}
}
/// Pops the first `nr_iovecs` iovecs from the back of the buffer.
///
/// This will panic if we are asked
/// to pop more iovecs than what is currently available in the buffer.
pub fn pop_back(&mut self, nr_iovecs: u16) {
assert!(
self.len() >= nr_iovecs,
"Internal bug! Trying to drop more iovec objects than what is available"
);
self.len -= nr_iovecs;
}
/// Get a slice of the iovec objects currently in the buffer.
pub fn as_slice(&self) -> &[iovec] {
// SAFETY: Here we create a slice out of the existing elements in the buffer (not the whole
// allocated memory). That means that we can:
// * We can read `self.len * mem::size_of::<iovec>()` bytes out of the memory range we are
// returning.
// * `self.iov.add(self.start.into())` is a non-null pointer and aligned.
// * The underlying memory comes from a single allocation.
// * The returning pointer points to `self.len` consecutive initialized `iovec` objects.
// * We are only accessing the underlying memory through the returned slice. Since we are
// returning a slice of only the existing pushed elements the slice does not contain any
// aliasing references.
// * The slice can be up to 1 page long which is smaller than `isize::MAX`.
unsafe {
let slice_start = self.iov.add(self.start.into());
std::slice::from_raw_parts(slice_start, self.len.into())
}
}
/// Get a mutable slice of the iovec objects currently in the buffer.
pub fn as_mut_slice(&mut self) -> &mut [iovec] {
// SAFETY: Here we create a slice out of the existing elements in the buffer (not the whole
// allocated memory). That means that we can:
// * We can read/write `self.len * mem::size_of::<iovec>()` bytes out of the memory range we
// are returning.
// * The underlying memory comes from a single allocation.
// * `self.iov.add(self.start.into())` is a non-null pointer and aligned
// * The returning pointer points to `self.len` consecutive initialized `iovec` objects.
// * We are only accessing the underlying memory through the returned slice. Since we are
// returning a slice of only the existing pushed elements the slice does not contain any
// aliasing references.
// * The slice can be up to 1 page long which is smaller than `isize::MAX`.
unsafe {
let slice_start = self.iov.add(self.start.into());
std::slice::from_raw_parts_mut(slice_start, self.len.into())
}
}
}
impl<const L: u16> Drop for IovDeque<L> {
fn drop(&mut self) {
let pages_bytes = Self::pages_bytes();
// SAFETY: We are passing an address that we got from a previous allocation of `2 *
// pages_bytes` by calling mmap
let _ = unsafe { libc::munmap(self.iov.cast(), 2 * pages_bytes) };
}
}
#[cfg(test)]
mod tests {
use libc::iovec;
// Redefine `IovDeque` with specific length. Otherwise
// Rust will not know what to do.
type IovDeque = super::IovDeque<256>;
#[test]
fn test_new() {
let deque = IovDeque::new().unwrap();
assert_eq!(deque.len(), 0);
}
#[test]
fn test_new_less_than_page() {
let deque = super::IovDeque::<128>::new().unwrap();
assert_eq!(deque.len(), 0);
}
#[test]
fn test_new_more_than_page() {
let deque = super::IovDeque::<512>::new().unwrap();
assert_eq!(deque.len(), 0);
}
fn make_iovec(id: u16, len: u16) -> iovec {
iovec {
iov_base: id as *mut libc::c_void,
iov_len: len as usize,
}
}
#[test]
#[should_panic]
fn test_push_back_too_many() {
let mut deque = IovDeque::new().unwrap();
assert_eq!(deque.len(), 0);
for i in 0u16..256 {
deque.push_back(make_iovec(i, i));
assert_eq!(deque.len(), i + 1);
}
deque.push_back(make_iovec(0, 0));
}
#[test]
#[should_panic]
fn test_pop_front_from_empty() {
let mut deque = IovDeque::new().unwrap();
deque.pop_front(1);
}
#[test]
#[should_panic]
fn test_pop_front_too_many() {
let mut deque = IovDeque::new().unwrap();
deque.push_back(make_iovec(42, 42));
deque.pop_front(2);
}
#[test]
fn test_pop_font() {
let mut deque = IovDeque::new().unwrap();
assert_eq!(deque.len(), 0);
assert!(!deque.is_full());
deque.pop_front(0);
let iovs: Vec<_> = (0..4).map(|i| make_iovec(i, i)).collect();
for iov in iovs.iter() {
deque.push_back(*iov);
}
assert_eq!(deque.as_slice(), &iovs);
assert_eq!(deque.as_mut_slice(), &iovs);
deque.pop_front(1);
assert_eq!(deque.as_slice(), &iovs[1..]);
assert_eq!(deque.as_mut_slice(), &iovs[1..]);
deque.pop_front(1);
assert_eq!(deque.as_slice(), &iovs[2..]);
assert_eq!(deque.as_mut_slice(), &iovs[2..]);
deque.pop_front(1);
assert_eq!(deque.as_slice(), &iovs[3..]);
assert_eq!(deque.as_mut_slice(), &iovs[3..]);
deque.pop_front(1);
assert_eq!(deque.as_slice(), &iovs[4..]);
assert_eq!(deque.as_mut_slice(), &iovs[4..]);
for i in 0u16..256 {
deque.push_back(make_iovec(i, i));
assert_eq!(deque.len(), i + 1);
}
assert!(deque.is_full());
assert!(deque.len() != 0);
for i in 0u16..256 {
deque.pop_front(1);
assert_eq!(deque.len(), 256 - i - 1);
}
}
#[test]
fn test_pop_back() {
let mut deque = IovDeque::new().unwrap();
assert_eq!(deque.len(), 0);
assert!(!deque.is_full());
deque.pop_back(0);
let iovs: Vec<_> = (0..4).map(|i| make_iovec(i, i)).collect();
for iov in iovs.iter() {
deque.push_back(*iov);
}
assert_eq!(deque.as_slice(), &iovs);
assert_eq!(deque.as_mut_slice(), &iovs);
deque.pop_back(1);
assert_eq!(deque.as_slice(), &iovs[..iovs.len() - 1]);
assert_eq!(deque.as_mut_slice(), &iovs[..iovs.len() - 1]);
deque.pop_back(1);
assert_eq!(deque.as_slice(), &iovs[..iovs.len() - 2]);
assert_eq!(deque.as_mut_slice(), &iovs[..iovs.len() - 2]);
deque.pop_back(1);
assert_eq!(deque.as_slice(), &iovs[..iovs.len() - 3]);
assert_eq!(deque.as_mut_slice(), &iovs[..iovs.len() - 3]);
deque.pop_back(1);
assert_eq!(deque.as_slice(), &iovs[..iovs.len() - 4]);
assert_eq!(deque.as_mut_slice(), &iovs[..iovs.len() - 4]);
for i in 0u16..256 {
deque.push_back(make_iovec(i, i));
assert_eq!(deque.len(), i + 1);
}
assert!(deque.is_full());
assert!(deque.len() != 0);
for i in 0u16..256 {
deque.pop_back(1);
assert_eq!(deque.len(), 256 - i - 1);
}
}
#[test]
fn test_pop_many() {
let mut deque = IovDeque::new().unwrap();
for i in 0u16..256 {
deque.push_back(make_iovec(i, i));
}
deque.pop_front(1);
assert_eq!(deque.len(), 255);
deque.pop_front(2);
assert_eq!(deque.len(), 253);
deque.pop_front(4);
assert_eq!(deque.len(), 249);
deque.pop_front(8);
assert_eq!(deque.len(), 241);
deque.pop_front(16);
assert_eq!(deque.len(), 225);
deque.pop_front(32);
assert_eq!(deque.len(), 193);
deque.pop_front(64);
assert_eq!(deque.len(), 129);
deque.pop_front(128);
assert_eq!(deque.len(), 1);
}
#[test]
fn test_as_slice() {
let mut deque = IovDeque::new().unwrap();
assert!(deque.as_slice().is_empty());
for i in 0..256 {
deque.push_back(make_iovec(i, 100));
assert_eq!(deque.as_slice().len(), (i + 1) as usize);
}
let copy: Vec<iovec> = deque.as_slice().to_vec();
assert_eq!(copy.len(), deque.len() as usize);
for (i, iov) in deque.as_slice().iter().enumerate() {
assert_eq!(iov.iov_len, copy[i].iov_len);
}
}
#[test]
fn test_as_mut_slice() {
let mut deque = IovDeque::new().unwrap();
assert!(deque.as_mut_slice().is_empty());
for i in 0..256 {
deque.push_back(make_iovec(i, 100));
assert_eq!(deque.as_mut_slice().len(), (i + 1) as usize);
}
let copy: Vec<iovec> = deque.as_mut_slice().to_vec();
deque
.as_mut_slice()
.iter_mut()
.for_each(|iov| iov.iov_len *= 2);
assert_eq!(copy.len(), deque.len() as usize);
for (i, iov) in deque.as_slice().iter().enumerate() {
assert_eq!(iov.iov_len, 2 * copy[i].iov_len);
}
}
#[test]
fn test_size_less_than_capacity() {
// Usually we have a queue size of 256 which is a perfect fit
// for 4K pages. But with 16K or bigger pages the `perfect fit`
// is not perfect anymore. Need to ensure the wraparound logic
// remains valid in such cases.
const L: u16 = 16;
let mut deque = super::IovDeque::<L>::new().unwrap();
assert!(deque.as_mut_slice().is_empty());
// Number of times need to fill/empty the queue to reach the
// wraparound point.
let fills = deque.capacity / L;
// Almost reach the wraparound.
for _ in 0..(fills - 1) {
for _ in 0..L {
deque.push_back(make_iovec(0, 100));
}
deque.pop_front(L);
}
// 1 element away from the wraparound
for _ in 0..(L - 1) {
deque.push_back(make_iovec(0, 100));
}
deque.pop_front(L - 1);
// Start filling the 'second' page
// First element will be put at the end of the
// first page, while the rest will be in `second`
// page.
for _ in 0..L {
deque.push_back(make_iovec(1, 100));
}
// Pop one element to trigger the wraparound.
deque.pop_front(1);
// Now the slice should be pointing to the memory of the `first` page
// which should have the same content as the `second` page.
assert_eq!(deque.as_slice(), vec![make_iovec(1, 100); L as usize - 1]);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/pmem/persist.rs | src/vmm/src/devices/virtio/pmem/persist.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::{Deserialize, Serialize};
use vm_memory::GuestAddress;
use super::device::{ConfigSpace, Pmem, PmemError};
use crate::Vm;
use crate::devices::virtio::device::DeviceState;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_PMEM;
use crate::devices::virtio::persist::{PersistError as VirtioStateError, VirtioDeviceState};
use crate::devices::virtio::pmem::{PMEM_NUM_QUEUES, PMEM_QUEUE_SIZE};
use crate::snapshot::Persist;
use crate::vmm_config::pmem::PmemConfig;
use crate::vstate::memory::{GuestMemoryMmap, GuestRegionMmap};
use crate::vstate::vm::VmError;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PmemState {
pub virtio_state: VirtioDeviceState,
pub config_space: ConfigSpace,
pub config: PmemConfig,
}
#[derive(Debug)]
pub struct PmemConstructorArgs<'a> {
pub mem: &'a GuestMemoryMmap,
pub vm: &'a Vm,
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum PmemPersistError {
/// Error resetting VirtIO state: {0}
VirtioState(#[from] VirtioStateError),
/// Error creating Pmem devie: {0}
Pmem(#[from] PmemError),
/// Error registering memory region: {0}
Vm(#[from] VmError),
}
impl<'a> Persist<'a> for Pmem {
type State = PmemState;
type ConstructorArgs = PmemConstructorArgs<'a>;
type Error = PmemPersistError;
fn save(&self) -> Self::State {
PmemState {
virtio_state: VirtioDeviceState::from_device(self),
config_space: self.config_space,
config: self.config.clone(),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let queues = state.virtio_state.build_queues_checked(
constructor_args.mem,
VIRTIO_ID_PMEM,
PMEM_NUM_QUEUES,
PMEM_QUEUE_SIZE,
)?;
let mut pmem = Pmem::new_with_queues(state.config.clone(), queues)?;
pmem.config_space = state.config_space;
pmem.avail_features = state.virtio_state.avail_features;
pmem.acked_features = state.virtio_state.acked_features;
pmem.set_mem_region(constructor_args.vm)?;
Ok(pmem)
}
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::arch::Kvm;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::test_utils::default_mem;
use crate::snapshot::Snapshot;
#[test]
fn test_persistence() {
// We create the backing file here so that it exists for the whole lifetime of the test.
let dummy_file = TempFile::new().unwrap();
dummy_file.as_file().set_len(0x20_0000);
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let config = PmemConfig {
id: "1".into(),
path_on_host: dummy_path,
root_device: true,
read_only: false,
};
let pmem = Pmem::new(config).unwrap();
let guest_mem = default_mem();
let kvm = Kvm::new(vec![]).unwrap();
let vm = Vm::new(&kvm).unwrap();
// Save the block device.
let mut mem = vec![0; 4096];
Snapshot::new(pmem.save())
.save(&mut mem.as_mut_slice())
.unwrap();
// Restore the block device.
let restored_pmem = Pmem::restore(
PmemConstructorArgs {
mem: &guest_mem,
vm: &vm,
},
&Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data,
)
.unwrap();
// Test that virtio specific fields are the same.
assert_eq!(restored_pmem.device_type(), VIRTIO_ID_PMEM);
assert_eq!(restored_pmem.avail_features(), pmem.avail_features());
assert_eq!(restored_pmem.acked_features(), pmem.acked_features());
assert_eq!(restored_pmem.queues(), pmem.queues());
assert!(!pmem.is_activated());
assert!(!restored_pmem.is_activated());
assert_eq!(restored_pmem.config, pmem.config);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/pmem/device.rs | src/vmm/src/devices/virtio/pmem/device.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::{File, OpenOptions};
use std::ops::{Deref, DerefMut};
use std::os::fd::AsRawFd;
use std::sync::{Arc, Mutex};
use kvm_bindings::{KVM_MEM_READONLY, kvm_userspace_memory_region};
use kvm_ioctls::VmFd;
use serde::{Deserialize, Serialize};
use vm_allocator::AllocPolicy;
use vm_memory::mmap::{MmapRegionBuilder, MmapRegionError};
use vm_memory::{GuestAddress, GuestMemoryError};
use vmm_sys_util::eventfd::EventFd;
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::device::{ActiveState, DeviceState, VirtioDevice};
use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_PMEM;
use crate::devices::virtio::pmem::PMEM_QUEUE_SIZE;
use crate::devices::virtio::pmem::metrics::{PmemMetrics, PmemMetricsPerDevice};
use crate::devices::virtio::queue::{DescriptorChain, InvalidAvailIdx, Queue, QueueError};
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::logger::{IncMetric, error, info};
use crate::utils::{align_up, u64_to_usize};
use crate::vmm_config::pmem::PmemConfig;
use crate::vstate::memory::{ByteValued, Bytes, GuestMemoryMmap, GuestMmapRegion};
use crate::vstate::vm::VmError;
use crate::{Vm, impl_device_type};
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum PmemError {
/// Cannot set the memory regions: {0}
SetUserMemoryRegion(VmError),
/// Unablet to allocate a KVM slot for the device
NoKvmSlotAvailable,
/// Error accessing backing file: {0}
BackingFile(std::io::Error),
/// Error backing file size is 0
BackingFileZeroSize,
/// Error with EventFd: {0}
EventFd(std::io::Error),
/// Unexpected read-only descriptor
ReadOnlyDescriptor,
/// Unexpected write-only descriptor
WriteOnlyDescriptor,
/// UnknownRequestType: {0}
UnknownRequestType(u32),
/// Descriptor chain too short
DescriptorChainTooShort,
/// Guest memory error: {0}
GuestMemory(#[from] GuestMemoryError),
/// Error handling the VirtIO queue: {0}
Queue(#[from] QueueError),
/// Error during obtaining the descriptor from the queue: {0}
QueuePop(#[from] InvalidAvailIdx),
}
const VIRTIO_PMEM_REQ_TYPE_FLUSH: u32 = 0;
const SUCCESS: i32 = 0;
const FAILURE: i32 = -1;
#[derive(Debug, Default, Copy, Clone, Serialize, Deserialize)]
#[repr(C)]
pub struct ConfigSpace {
// Physical address of the first byte of the persistent memory region.
pub start: u64,
// Length of the address range
pub size: u64,
}
// SAFETY: `ConfigSpace` contains only PODs in `repr(c)`, without padding.
unsafe impl ByteValued for ConfigSpace {}
#[derive(Debug)]
pub struct Pmem {
// VirtIO fields
pub avail_features: u64,
pub acked_features: u64,
pub activate_event: EventFd,
// Transport fields
pub device_state: DeviceState,
pub queues: Vec<Queue>,
pub queue_events: Vec<EventFd>,
// Pmem specific fields
pub config_space: ConfigSpace,
pub file: File,
pub file_len: u64,
pub mmap_ptr: u64,
pub metrics: Arc<PmemMetrics>,
pub config: PmemConfig,
}
impl Drop for Pmem {
fn drop(&mut self) {
let mmap_len = align_up(self.file_len, Self::ALIGNMENT);
// SAFETY: `mmap_ptr` is a valid pointer since Pmem can only be created with `new*` methods.
// Mapping size calculation is same for original mmap call.
unsafe {
_ = libc::munmap(self.mmap_ptr as *mut libc::c_void, u64_to_usize(mmap_len));
}
}
}
impl Pmem {
// Pmem devices need to have address and size to be
// a multiple of 2MB
pub const ALIGNMENT: u64 = 2 * 1024 * 1024;
/// Create a new Pmem device with a backing file at `disk_image_path` path.
pub fn new(config: PmemConfig) -> Result<Self, PmemError> {
Self::new_with_queues(config, vec![Queue::new(PMEM_QUEUE_SIZE)])
}
/// Create a new Pmem device with a backing file at `disk_image_path` path using a pre-created
/// set of queues.
pub fn new_with_queues(config: PmemConfig, queues: Vec<Queue>) -> Result<Self, PmemError> {
let (file, file_len, mmap_ptr, mmap_len) =
Self::mmap_backing_file(&config.path_on_host, config.read_only)?;
Ok(Self {
avail_features: 1u64 << VIRTIO_F_VERSION_1,
acked_features: 0u64,
activate_event: EventFd::new(libc::EFD_NONBLOCK).map_err(PmemError::EventFd)?,
device_state: DeviceState::Inactive,
queues,
queue_events: vec![EventFd::new(libc::EFD_NONBLOCK).map_err(PmemError::EventFd)?],
config_space: ConfigSpace {
start: 0,
size: mmap_len,
},
file,
file_len,
mmap_ptr,
metrics: PmemMetricsPerDevice::alloc(config.id.clone()),
config,
})
}
fn mmap_backing_file(path: &str, read_only: bool) -> Result<(File, u64, u64, u64), PmemError> {
let file = OpenOptions::new()
.read(true)
.write(!read_only)
.open(path)
.map_err(PmemError::BackingFile)?;
let file_len = file.metadata().unwrap().len();
if (file_len == 0) {
return Err(PmemError::BackingFileZeroSize);
}
let mut prot = libc::PROT_READ;
if !read_only {
prot |= libc::PROT_WRITE;
}
let mmap_len = align_up(file_len, Self::ALIGNMENT);
let mmap_ptr = if (mmap_len == file_len) {
// SAFETY: We are calling the system call with valid arguments and checking the returned
// value
unsafe {
let r = libc::mmap(
std::ptr::null_mut(),
u64_to_usize(file_len),
prot,
libc::MAP_SHARED | libc::MAP_NORESERVE,
file.as_raw_fd(),
0,
);
if r == libc::MAP_FAILED {
return Err(PmemError::BackingFile(std::io::Error::last_os_error()));
}
r
}
} else {
// SAFETY: We are calling system calls with valid arguments and checking returned
// values
//
// The double mapping is done to ensure the underlying memory has the size of
// `mmap_len` (wich is 2MB aligned as per `virtio-pmem` specification)
// First mmap creates a mapping of `mmap_len` while second mmaps the actual
// file on top. The remaining gap between the end of the mmaped file and
// the actual end of the memory region is backed by PRIVATE | ANONYMOUS memory.
unsafe {
let mmap_ptr = libc::mmap(
std::ptr::null_mut(),
u64_to_usize(mmap_len),
prot,
libc::MAP_PRIVATE | libc::MAP_NORESERVE | libc::MAP_ANONYMOUS,
-1,
0,
);
if mmap_ptr == libc::MAP_FAILED {
return Err(PmemError::BackingFile(std::io::Error::last_os_error()));
}
let r = libc::mmap(
mmap_ptr,
u64_to_usize(file_len),
prot,
libc::MAP_SHARED | libc::MAP_NORESERVE | libc::MAP_FIXED,
file.as_raw_fd(),
0,
);
if r == libc::MAP_FAILED {
return Err(PmemError::BackingFile(std::io::Error::last_os_error()));
}
mmap_ptr
}
};
Ok((file, file_len, mmap_ptr as u64, mmap_len))
}
/// Allocate memory in past_mmio64 memory region
pub fn alloc_region(&mut self, vm: &Vm) {
let mut resource_allocator_lock = vm.resource_allocator();
let resource_allocator = resource_allocator_lock.deref_mut();
let addr = resource_allocator
.past_mmio64_memory
.allocate(
self.config_space.size,
Pmem::ALIGNMENT,
AllocPolicy::FirstMatch,
)
.unwrap();
self.config_space.start = addr.start();
}
/// Set user memory region in KVM
pub fn set_mem_region(&mut self, vm: &Vm) -> Result<(), PmemError> {
let next_slot = vm.next_kvm_slot(1).ok_or(PmemError::NoKvmSlotAvailable)?;
let memory_region = kvm_userspace_memory_region {
slot: next_slot,
guest_phys_addr: self.config_space.start,
memory_size: self.config_space.size,
userspace_addr: self.mmap_ptr,
flags: if self.config.read_only {
KVM_MEM_READONLY
} else {
0
},
};
vm.set_user_memory_region(memory_region)
.map_err(PmemError::SetUserMemoryRegion)
}
pub fn handle_queue(&mut self) -> Result<(), PmemError> {
// This is safe since we checked in the event handler that the device is activated.
let active_state = self.device_state.active_state().unwrap();
while let Some(head) = self.queues[0].pop()? {
let add_result = match self.process_chain(head) {
Ok(()) => self.queues[0].add_used(head.index, 4),
Err(err) => {
error!("pmem: {err}");
self.metrics.event_fails.inc();
self.queues[0].add_used(head.index, 0)
}
};
if let Err(err) = add_result {
error!("pmem: {err}");
self.metrics.event_fails.inc();
break;
}
}
self.queues[0].advance_used_ring_idx();
if self.queues[0].prepare_kick() {
active_state
.interrupt
.trigger(VirtioInterruptType::Queue(0))
.unwrap_or_else(|err| {
error!("pmem: {err}");
self.metrics.event_fails.inc();
});
}
Ok(())
}
fn process_chain(&self, head: DescriptorChain) -> Result<(), PmemError> {
// This is safe since we checked in the event handler that the device is activated.
let active_state = self.device_state.active_state().unwrap();
if head.is_write_only() {
return Err(PmemError::WriteOnlyDescriptor);
}
let request: u32 = active_state.mem.read_obj(head.addr)?;
if request != VIRTIO_PMEM_REQ_TYPE_FLUSH {
return Err(PmemError::UnknownRequestType(request));
}
let Some(status_descriptor) = head.next_descriptor() else {
return Err(PmemError::DescriptorChainTooShort);
};
if !status_descriptor.is_write_only() {
return Err(PmemError::ReadOnlyDescriptor);
}
let mut result = SUCCESS;
// SAFETY: We are calling the system call with valid arguments and checking the returned
// value
unsafe {
let ret = libc::msync(
self.mmap_ptr as *mut libc::c_void,
u64_to_usize(self.file_len),
libc::MS_SYNC,
);
if ret < 0 {
error!("pmem: Unable to msync the file. Error: {}", ret);
result = FAILURE;
}
}
active_state.mem.write_obj(result, status_descriptor.addr)?;
Ok(())
}
pub fn process_queue(&mut self) {
self.metrics.queue_event_count.inc();
if let Err(err) = self.queue_events[0].read() {
error!("pmem: Failed to get queue event: {err:?}");
self.metrics.event_fails.inc();
return;
}
self.handle_queue().unwrap_or_else(|err| {
error!("pmem: {err:?}");
self.metrics.event_fails.inc();
});
}
}
impl VirtioDevice for Pmem {
impl_device_type!(VIRTIO_ID_PMEM);
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features;
}
fn queues(&self) -> &[Queue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [Queue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_events
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.device_state
.active_state()
.expect("Device not activated")
.interrupt
.deref()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
if let Some(config_space_bytes) = self.config_space.as_slice().get(u64_to_usize(offset)..) {
let len = config_space_bytes.len().min(data.len());
data[..len].copy_from_slice(&config_space_bytes[..len]);
} else {
error!("Failed to read config space");
self.metrics.cfg_fails.inc();
}
}
fn write_config(&mut self, _offset: u64, _data: &[u8]) {}
fn activate(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
for q in self.queues.iter_mut() {
q.initialize(&mem)
.map_err(ActivateError::QueueMemoryError)?;
}
if self.activate_event.write(1).is_err() {
self.metrics.activate_fails.inc();
return Err(ActivateError::EventFd);
}
self.device_state = DeviceState::Activated(ActiveState { mem, interrupt });
Ok(())
}
fn is_activated(&self) -> bool {
self.device_state.is_activated()
}
fn kick(&mut self) {
if self.is_activated() {
info!("kick pmem {}.", self.config.id);
self.handle_queue();
}
}
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::devices::virtio::queue::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use crate::devices::virtio::test_utils::{VirtQueue, default_interrupt, default_mem};
#[test]
fn test_from_config() {
let config = PmemConfig {
id: "1".into(),
path_on_host: "not_a_path".into(),
root_device: true,
read_only: false,
};
assert!(matches!(
Pmem::new(config).unwrap_err(),
PmemError::BackingFile(_),
));
let dummy_file = TempFile::new().unwrap();
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let config = PmemConfig {
id: "1".into(),
path_on_host: dummy_path.clone(),
root_device: true,
read_only: false,
};
assert!(matches!(
Pmem::new(config).unwrap_err(),
PmemError::BackingFileZeroSize,
));
dummy_file.as_file().set_len(0x20_0000);
let config = PmemConfig {
id: "1".into(),
path_on_host: dummy_path,
root_device: true,
read_only: false,
};
Pmem::new(config).unwrap();
}
#[test]
fn test_process_chain() {
let dummy_file = TempFile::new().unwrap();
dummy_file.as_file().set_len(0x20_0000);
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let config = PmemConfig {
id: "1".into(),
path_on_host: dummy_path,
root_device: true,
read_only: false,
};
let mut pmem = Pmem::new(config).unwrap();
let mem = default_mem();
let interrupt = default_interrupt();
let vq = VirtQueue::new(GuestAddress(0), &mem, 16);
pmem.queues[0] = vq.create_queue();
pmem.activate(mem.clone(), interrupt).unwrap();
// Valid request
{
vq.avail.ring[0].set(0);
vq.dtable[0].set(0x1000, 4, VIRTQ_DESC_F_NEXT, 1);
vq.avail.ring[1].set(1);
vq.dtable[1].set(0x2000, 4, VIRTQ_DESC_F_WRITE, 0);
mem.write_obj::<u32>(0, GuestAddress(0x1000)).unwrap();
mem.write_obj::<u32>(0x69, GuestAddress(0x2000)).unwrap();
vq.used.idx.set(0);
vq.avail.idx.set(1);
let head = pmem.queues[0].pop().unwrap().unwrap();
pmem.process_chain(head).unwrap();
assert_eq!(mem.read_obj::<u32>(GuestAddress(0x2000)).unwrap(), 0);
}
// Invalid request type
{
vq.avail.ring[0].set(0);
vq.dtable[0].set(0x1000, 4, VIRTQ_DESC_F_NEXT, 1);
mem.write_obj::<u32>(0x69, GuestAddress(0x1000)).unwrap();
pmem.queues[0] = vq.create_queue();
vq.used.idx.set(0);
vq.avail.idx.set(1);
let head = pmem.queues[0].pop().unwrap().unwrap();
assert!(matches!(
pmem.process_chain(head).unwrap_err(),
PmemError::UnknownRequestType(0x69),
));
}
// Short chain request
{
vq.avail.ring[0].set(0);
vq.dtable[0].set(0x1000, 4, 0, 1);
mem.write_obj::<u32>(0, GuestAddress(0x1000)).unwrap();
pmem.queues[0] = vq.create_queue();
vq.used.idx.set(0);
vq.avail.idx.set(1);
let head = pmem.queues[0].pop().unwrap().unwrap();
assert!(matches!(
pmem.process_chain(head).unwrap_err(),
PmemError::DescriptorChainTooShort,
));
}
// Write only first descriptor
{
vq.avail.ring[0].set(0);
vq.dtable[0].set(0x1000, 4, VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_NEXT, 1);
vq.avail.ring[1].set(1);
vq.dtable[1].set(0x2000, 4, VIRTQ_DESC_F_WRITE, 0);
mem.write_obj::<u32>(0, GuestAddress(0x1000)).unwrap();
pmem.queues[0] = vq.create_queue();
vq.used.idx.set(0);
vq.avail.idx.set(1);
let head = pmem.queues[0].pop().unwrap().unwrap();
assert!(matches!(
pmem.process_chain(head).unwrap_err(),
PmemError::WriteOnlyDescriptor,
));
}
// Read only second descriptor
{
vq.avail.ring[0].set(0);
vq.dtable[0].set(0x1000, 4, VIRTQ_DESC_F_NEXT, 1);
vq.avail.ring[1].set(1);
vq.dtable[1].set(0x2000, 4, 0, 0);
mem.write_obj::<u32>(0, GuestAddress(0x1000)).unwrap();
pmem.queues[0] = vq.create_queue();
vq.used.idx.set(0);
vq.avail.idx.set(1);
let head = pmem.queues[0].pop().unwrap().unwrap();
assert!(matches!(
pmem.process_chain(head).unwrap_err(),
PmemError::ReadOnlyDescriptor,
));
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/pmem/mod.rs | src/vmm/src/devices/virtio/pmem/mod.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod device;
pub mod event_handler;
pub mod metrics;
pub mod persist;
pub const PMEM_NUM_QUEUES: usize = 1;
pub const PMEM_QUEUE_SIZE: u16 = 256;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/pmem/event_handler.rs | src/vmm/src/devices/virtio/pmem/event_handler.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use event_manager::{EventOps, EventSet, Events, MutEventSubscriber};
use log::{error, warn};
use super::device::Pmem;
use crate::devices::virtio::device::VirtioDevice;
impl Pmem {
const PROCESS_ACTIVATE: u32 = 0;
const PROCESS_PMEM_QUEUE: u32 = 1;
fn register_runtime_events(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.queue_events[0],
Self::PROCESS_PMEM_QUEUE,
EventSet::IN,
)) {
error!("pmem: Failed to register queue event: {err}");
}
}
fn register_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.activate_event,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("pmem: Failed to register activate event: {err}");
}
}
fn process_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = self.activate_event.read() {
error!("pmem: Failed to consume activate event: {err}");
}
// Register runtime events
self.register_runtime_events(ops);
// Remove activate event
if let Err(err) = ops.remove(Events::with_data(
&self.activate_event,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("pmem: Failed to unregister activate event: {err}");
}
}
}
impl MutEventSubscriber for Pmem {
fn init(&mut self, ops: &mut EventOps) {
if self.is_activated() {
self.register_runtime_events(ops)
} else {
self.register_activate_event(ops)
}
}
fn process(&mut self, events: Events, ops: &mut EventOps) {
let event_set = events.event_set();
let source = events.data();
if !event_set.contains(EventSet::IN) {
warn!("pmem: Received unknown event: {event_set:#?} from source {source}");
return;
}
if !self.is_activated() {
warn!("pmem: The device is not activated yet. Spurious event received from {source}");
return;
}
match source {
Self::PROCESS_ACTIVATE => self.process_activate_event(ops),
Self::PROCESS_PMEM_QUEUE => self.process_queue(),
_ => {
warn!("pmem: Unknown event received: {source}");
}
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/pmem/metrics.rs | src/vmm/src/devices/virtio/pmem/metrics.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system for pmem devices.
//!
//! # Metrics format
//! The metrics are flushed in JSON when requested by vmm::logger::metrics::METRICS.write().
//!
//! ## JSON example with metrics:
//! ```json
//! {
//! "pmem_drv0": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "no_avail_buffer": "SharedIncMetric",
//! "event_fails": "SharedIncMetric",
//! "execute_fails": "SharedIncMetric",
//! ...
//! }
//! "pmem_drv1": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "no_avail_buffer": "SharedIncMetric",
//! "event_fails": "SharedIncMetric",
//! "execute_fails": "SharedIncMetric",
//! ...
//! }
//! ...
//! "pmem_drive_id": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "no_avail_buffer": "SharedIncMetric",
//! "event_fails": "SharedIncMetric",
//! "execute_fails": "SharedIncMetric",
//! ...
//! }
//! "pmem": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "no_avail_buffer": "SharedIncMetric",
//! "event_fails": "SharedIncMetric",
//! "execute_fails": "SharedIncMetric",
//! ...
//! }
//! }
//! ```
//! Each `pmem` field in the example above is a serializable `PmemDeviceMetrics` structure
//! collecting metrics such as `activate_fails`, `cfg_fails`, etc. for the pmem device.
//! `pmem_drv0` represent metrics for the endpoint "/pmem/drv0",
//! `pmem_drv1` represent metrics for the endpoint "/pmem/drv1", and
//! `pmem_drive_id` represent metrics for the endpoint "/pmem/{drive_id}"
//! pmem device respectively and `pmem` is the aggregate of all the per device metrics.
//!
//! # Limitations
//! pmem device currently do not have `vmm::logger::metrics::StoreMetrics` so aggregate
//! doesn't consider them.
//!
//! # Design
//! The main design goals of this system are:
//! * To improve pmem device metrics by logging them at per device granularity.
//! * Continue to provide aggregate pmem metrics to maintain backward compatibility.
//! * Move PmemDeviceMetrics out of from logger and decouple it.
//! * Rely on `serde` to provide the actual serialization for writing the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them, to
//! avoid having to initialize everything by hand.
//!
//! * Devices could be created in any order i.e. the first device created could either be drv0 or
//! drv1 so if we use a vector for PmemDeviceMetrics and call 1st device as pmem0, then pmem0
//! could sometimes point to drv0 and sometimes to drv1 which doesn't help with analysing the
//! metrics. So, use Map instead of Vec to help understand which drive the metrics actually
//! belongs to.
//!
//! The system implements 1 type of metrics:
//! * Shared Incremental Metrics (SharedIncMetrics) - dedicated for the metrics which need a counter
//! (i.e the number of times an API request failed). These metrics are reset upon flush.
//!
//! We add PmemDeviceMetrics entries from pmem::metrics::METRICS into Pmem device instead of
//! Pmem device having individual separate PmemDeviceMetrics entries because Pmem device is not
//! accessible from signal handlers to flush metrics and pmem::metrics::METRICS is.
use std::collections::BTreeMap;
use std::sync::{Arc, RwLock};
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use crate::logger::{IncMetric, LatencyAggregateMetrics, SharedIncMetric};
/// map of pmem drive id and metrics
/// this should be protected by a lock before accessing.
#[derive(Debug)]
pub struct PmemMetricsPerDevice {
/// used to access per pmem device metrics
pub metrics: BTreeMap<String, Arc<PmemMetrics>>,
}
impl PmemMetricsPerDevice {
/// Allocate `PmemDeviceMetrics` for pmem device having
/// id `drive_id`. Also, allocate only if it doesn't
/// exist to avoid overwriting previously allocated data.
/// lock is always initialized so it is safe the unwrap
/// the lock without a check.
pub fn alloc(drive_id: String) -> Arc<PmemMetrics> {
Arc::clone(
METRICS
.write()
.unwrap()
.metrics
.entry(drive_id)
.or_insert_with(|| Arc::new(PmemMetrics::default())),
)
}
}
/// Pool of pmem-related metrics per device behind a lock to
/// keep things thread safe. Since the lock is initialized here
/// it is safe to unwrap it without any check.
static METRICS: RwLock<PmemMetricsPerDevice> = RwLock::new(PmemMetricsPerDevice {
metrics: BTreeMap::new(),
});
/// This function facilitates aggregation and serialization of
/// per pmem device metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let pmem_metrics = METRICS.read().unwrap();
let metrics_len = pmem_metrics.metrics.len();
// +1 to accommodate aggregate pmem metrics
let mut seq = serializer.serialize_map(Some(1 + metrics_len))?;
let mut pmem_aggregated: PmemMetrics = PmemMetrics::default();
for (name, metrics) in pmem_metrics.metrics.iter() {
let devn = format!("pmem_{}", name);
// serialization will flush the metrics so aggregate before it.
let m: &PmemMetrics = metrics;
pmem_aggregated.aggregate(m);
seq.serialize_entry(&devn, m)?;
}
seq.serialize_entry("pmem", &pmem_aggregated)?;
seq.end()
}
/// Pmem Device associated metrics.
#[derive(Debug, Default, Serialize)]
pub struct PmemMetrics {
/// Number of times when activate failed on a pmem device.
pub activate_fails: SharedIncMetric,
/// Number of times when interacting with the space config of a pmem device failed.
pub cfg_fails: SharedIncMetric,
/// Number of times when handling events on a pmem device failed.
pub event_fails: SharedIncMetric,
/// Number of events triggered on the queue of this pmem device.
pub queue_event_count: SharedIncMetric,
}
impl PmemMetrics {
/// Const default construction.
pub fn new() -> Self {
Self {
..Default::default()
}
}
/// pmem metrics are SharedIncMetric where the diff of current vs
/// old is serialized i.e. serialize_u64(current-old).
/// So to have the aggregate serialized in same way we need to
/// fetch the diff of current vs old metrics and add it to the
/// aggregate.
pub fn aggregate(&mut self, other: &Self) {
self.activate_fails.add(other.activate_fails.fetch_diff());
self.cfg_fails.add(other.cfg_fails.fetch_diff());
self.event_fails.add(other.event_fails.fetch_diff());
self.queue_event_count
.add(other.queue_event_count.fetch_diff());
}
}
#[cfg(test)]
pub mod tests {
use super::*;
#[test]
fn test_max_pmem_dev_metrics() {
// Note: this test has nothing to do with
// pmem structure or IRQs, this is just to allocate
// metrics for max number of devices that system can have.
// We have 5-23 IRQ for pmem devices on x86_64 so, there
// are 19 pmem devices at max. And, even though we have more
// devices on aarch64 but we stick to 19 to keep test common.
const MAX_PMEM_DEVICES: usize = 19;
// This is to make sure that RwLock for pmem::metrics::METRICS is good.
drop(METRICS.read().unwrap());
drop(METRICS.write().unwrap());
// pmem::metrics::METRICS is in short RwLock on Vec of PmemDeviceMetrics.
// Normally, pointer to unique entries of pmem::metrics::METRICS are stored
// in Pmem device so that Pmem device can do self.metrics.* to
// update a metric. We try to do something similar here without
// using Pmem device by allocating max number of
// PmemDeviceMetrics in pmem::metrics::METRICS and store pointer to
// each entry in the local `metrics` vec.
// We then update 1 IncMetric and 2 SharedMetric for each metrics
// and validate if the metrics for per device was updated as
// expected.
let mut metrics: Vec<Arc<PmemMetrics>> = Vec::new();
for i in 0..MAX_PMEM_DEVICES {
let pmem_name: String = format!("pmem{}", i);
metrics.push(PmemMetricsPerDevice::alloc(pmem_name.clone()));
// update IncMetric
metrics[i].activate_fails.inc();
if i == 0 {
// Unit tests run in parallel and we have
// `test_single_pmem_dev_metrics` that also increases
// the IncMetric count of drv0 by 1 (intentional to check
// thread safety) so we check if the count is >=1.
assert!(metrics[i].activate_fails.count() >= 1);
} else {
assert!(metrics[i].activate_fails.count() == 1);
}
}
}
#[test]
fn test_single_pmem_dev_metrics() {
let test_metrics = PmemMetricsPerDevice::alloc(String::from("pmem0"));
// Test to update IncMetrics
test_metrics.activate_fails.inc();
assert!(
test_metrics.activate_fails.count() > 0,
"{}",
test_metrics.activate_fails.count()
);
// We expect only 2 tests (this and test_max_pmem_dev_metrics)
// to update activate_fails count for pmem0.
assert!(
test_metrics.activate_fails.count() <= 2,
"{}",
test_metrics.activate_fails.count()
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/rng/persist.rs | src/vmm/src/devices/virtio/rng/persist.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring entropy devices.
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_RNG;
use crate::devices::virtio::persist::{PersistError as VirtioStateError, VirtioDeviceState};
use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE;
use crate::devices::virtio::rng::{Entropy, EntropyError, RNG_NUM_QUEUES};
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::rate_limiter::RateLimiter;
use crate::rate_limiter::persist::RateLimiterState;
use crate::snapshot::Persist;
use crate::vstate::memory::GuestMemoryMmap;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EntropyState {
pub virtio_state: VirtioDeviceState,
rate_limiter_state: RateLimiterState,
}
#[derive(Debug)]
pub struct EntropyConstructorArgs {
pub mem: GuestMemoryMmap,
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum EntropyPersistError {
/// Create entropy: {0}
CreateEntropy(#[from] EntropyError),
/// Virtio state: {0}
VirtioState(#[from] VirtioStateError),
/// Restore rate limiter: {0}
RestoreRateLimiter(#[from] std::io::Error),
}
impl Persist<'_> for Entropy {
type State = EntropyState;
type ConstructorArgs = EntropyConstructorArgs;
type Error = EntropyPersistError;
fn save(&self) -> Self::State {
EntropyState {
virtio_state: VirtioDeviceState::from_device(self),
rate_limiter_state: self.rate_limiter().save(),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let queues = state.virtio_state.build_queues_checked(
&constructor_args.mem,
VIRTIO_ID_RNG,
RNG_NUM_QUEUES,
FIRECRACKER_MAX_QUEUE_SIZE,
)?;
let rate_limiter = RateLimiter::restore((), &state.rate_limiter_state)?;
let mut entropy = Entropy::new_with_queues(queues, rate_limiter)?;
entropy.set_avail_features(state.virtio_state.avail_features);
entropy.set_acked_features(state.virtio_state.acked_features);
Ok(entropy)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::rng::device::ENTROPY_DEV_ID;
use crate::devices::virtio::test_utils::default_interrupt;
use crate::devices::virtio::test_utils::test::create_virtio_mem;
use crate::snapshot::Snapshot;
#[test]
fn test_persistence() {
let mut mem = vec![0u8; 4096];
let entropy = Entropy::new(RateLimiter::default()).unwrap();
Snapshot::new(entropy.save())
.save(&mut mem.as_mut_slice())
.unwrap();
let guest_mem = create_virtio_mem();
let restored = Entropy::restore(
EntropyConstructorArgs { mem: guest_mem },
&Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data,
)
.unwrap();
assert_eq!(restored.device_type(), VIRTIO_ID_RNG);
assert_eq!(restored.id(), ENTROPY_DEV_ID);
assert!(!restored.is_activated());
assert!(!entropy.is_activated());
assert_eq!(restored.avail_features(), entropy.avail_features());
assert_eq!(restored.acked_features(), entropy.acked_features());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/rng/device.rs | src/vmm/src/devices/virtio/rng/device.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::io;
use std::ops::Deref;
use std::sync::Arc;
use aws_lc_rs::rand;
use log::info;
use vm_memory::GuestMemoryError;
use vmm_sys_util::eventfd::EventFd;
use super::metrics::METRICS;
use super::{RNG_NUM_QUEUES, RNG_QUEUE};
use crate::devices::DeviceError;
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::device::{ActiveState, DeviceState, VirtioDevice};
use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_RNG;
use crate::devices::virtio::iov_deque::IovDequeError;
use crate::devices::virtio::iovec::IoVecBufferMut;
use crate::devices::virtio::queue::{FIRECRACKER_MAX_QUEUE_SIZE, InvalidAvailIdx, Queue};
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::impl_device_type;
use crate::logger::{IncMetric, debug, error};
use crate::rate_limiter::{RateLimiter, TokenType};
use crate::vstate::memory::GuestMemoryMmap;
pub const ENTROPY_DEV_ID: &str = "rng";
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum EntropyError {
/// Error while handling an Event file descriptor: {0}
EventFd(#[from] io::Error),
/// Bad guest memory buffer: {0}
GuestMemory(#[from] GuestMemoryError),
/// Could not get random bytes: {0}
Random(#[from] aws_lc_rs::error::Unspecified),
/// Underlying IovDeque error: {0}
IovDeque(#[from] IovDequeError),
}
#[derive(Debug)]
pub struct Entropy {
// VirtIO fields
avail_features: u64,
acked_features: u64,
activate_event: EventFd,
// Transport fields
device_state: DeviceState,
pub(crate) queues: Vec<Queue>,
queue_events: Vec<EventFd>,
// Device specific fields
rate_limiter: RateLimiter,
buffer: IoVecBufferMut,
}
impl Entropy {
pub fn new(rate_limiter: RateLimiter) -> Result<Self, EntropyError> {
let queues = vec![Queue::new(FIRECRACKER_MAX_QUEUE_SIZE); RNG_NUM_QUEUES];
Self::new_with_queues(queues, rate_limiter)
}
pub fn new_with_queues(
queues: Vec<Queue>,
rate_limiter: RateLimiter,
) -> Result<Self, EntropyError> {
let activate_event = EventFd::new(libc::EFD_NONBLOCK)?;
let queue_events = (0..RNG_NUM_QUEUES)
.map(|_| EventFd::new(libc::EFD_NONBLOCK))
.collect::<Result<Vec<EventFd>, io::Error>>()?;
Ok(Self {
avail_features: 1 << VIRTIO_F_VERSION_1,
acked_features: 0u64,
activate_event,
device_state: DeviceState::Inactive,
queues,
queue_events,
rate_limiter,
buffer: IoVecBufferMut::new()?,
})
}
pub fn id(&self) -> &str {
ENTROPY_DEV_ID
}
fn signal_used_queue(&self) -> Result<(), DeviceError> {
self.interrupt_trigger()
.trigger(VirtioInterruptType::Queue(RNG_QUEUE.try_into().unwrap()))
.map_err(DeviceError::FailedSignalingIrq)
}
fn rate_limit_request(&mut self, bytes: u64) -> bool {
if !self.rate_limiter.consume(1, TokenType::Ops) {
return false;
}
if !self.rate_limiter.consume(bytes, TokenType::Bytes) {
self.rate_limiter.manual_replenish(1, TokenType::Ops);
return false;
}
true
}
fn rate_limit_replenish_request(rate_limiter: &mut RateLimiter, bytes: u64) {
rate_limiter.manual_replenish(1, TokenType::Ops);
rate_limiter.manual_replenish(bytes, TokenType::Bytes);
}
fn handle_one(&mut self) -> Result<u32, EntropyError> {
// If guest provided us with an empty buffer just return directly
if self.buffer.is_empty() {
return Ok(0);
}
let mut rand_bytes = vec![0; self.buffer.len() as usize];
rand::fill(&mut rand_bytes).inspect_err(|_| {
METRICS.host_rng_fails.inc();
})?;
// It is ok to unwrap here. We are writing `iovec.len()` bytes at offset 0.
self.buffer.write_all_volatile_at(&rand_bytes, 0).unwrap();
Ok(self.buffer.len())
}
fn process_entropy_queue(&mut self) -> Result<(), InvalidAvailIdx> {
let mut used_any = false;
while let Some(desc) = self.queues[RNG_QUEUE].pop()? {
// This is safe since we checked in the event handler that the device is activated.
let mem = &self.device_state.active_state().unwrap().mem;
let index = desc.index;
METRICS.entropy_event_count.inc();
// SAFETY: This descriptor chain points to a single `DescriptorChain` memory buffer,
// no other `IoVecBufferMut` object points to the same `DescriptorChain` at the same
// time and we clear the `iovec` after we process the request.
let bytes = match unsafe { self.buffer.load_descriptor_chain(mem, desc) } {
Ok(()) => {
debug!(
"entropy: guest request for {} bytes of entropy",
self.buffer.len()
);
// Check for available rate limiting budget.
// If not enough budget is available, leave the request descriptor in the queue
// to handle once we do have budget.
if !self.rate_limit_request(u64::from(self.buffer.len())) {
debug!("entropy: throttling entropy queue");
METRICS.entropy_rate_limiter_throttled.inc();
self.queues[RNG_QUEUE].undo_pop();
break;
}
self.handle_one().unwrap_or_else(|err| {
error!("entropy: {err}");
METRICS.entropy_event_fails.inc();
0
})
}
Err(err) => {
error!("entropy: Could not parse descriptor chain: {err}");
METRICS.entropy_event_fails.inc();
0
}
};
match self.queues[RNG_QUEUE].add_used(index, bytes) {
Ok(_) => {
used_any = true;
METRICS.entropy_bytes.add(bytes.into());
}
Err(err) => {
error!("entropy: Could not add used descriptor to queue: {err}");
Self::rate_limit_replenish_request(&mut self.rate_limiter, bytes.into());
METRICS.entropy_event_fails.inc();
// If we are not able to add a buffer to the used queue, something
// is probably seriously wrong, so just stop processing additional
// buffers
break;
}
}
}
self.queues[RNG_QUEUE].advance_used_ring_idx();
if used_any {
self.signal_used_queue().unwrap_or_else(|err| {
error!("entropy: {err:?}");
METRICS.entropy_event_fails.inc()
});
}
Ok(())
}
pub(crate) fn process_entropy_queue_event(&mut self) {
if let Err(err) = self.queue_events[RNG_QUEUE].read() {
error!("Failed to read entropy queue event: {err}");
METRICS.entropy_event_fails.inc();
} else if !self.rate_limiter.is_blocked() {
// We are not throttled, handle the entropy queue
self.process_entropy_queue().unwrap()
} else {
METRICS.rate_limiter_event_count.inc();
}
}
pub(crate) fn process_rate_limiter_event(&mut self) {
METRICS.rate_limiter_event_count.inc();
match self.rate_limiter.event_handler() {
Ok(_) => {
// There might be enough budget now to process entropy requests.
self.process_entropy_queue().unwrap()
}
Err(err) => {
error!("entropy: Failed to handle rate-limiter event: {err:?}");
METRICS.entropy_event_fails.inc();
}
}
}
pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> {
self.process_entropy_queue()
}
pub fn rate_limiter(&self) -> &RateLimiter {
&self.rate_limiter
}
pub(crate) fn set_avail_features(&mut self, features: u64) {
self.avail_features = features;
}
pub(crate) fn set_acked_features(&mut self, features: u64) {
self.acked_features = features;
}
pub(crate) fn set_activated(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) {
self.device_state = DeviceState::Activated(ActiveState { mem, interrupt });
}
pub(crate) fn activate_event(&self) -> &EventFd {
&self.activate_event
}
}
impl VirtioDevice for Entropy {
impl_device_type!(VIRTIO_ID_RNG);
fn queues(&self) -> &[Queue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [Queue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_events
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.device_state
.active_state()
.expect("Device is not initialized")
.interrupt
.deref()
}
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features;
}
fn read_config(&self, _offset: u64, mut _data: &mut [u8]) {}
fn write_config(&mut self, _offset: u64, _data: &[u8]) {}
fn is_activated(&self) -> bool {
self.device_state.is_activated()
}
fn activate(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
for q in self.queues.iter_mut() {
q.initialize(&mem)
.map_err(ActivateError::QueueMemoryError)?;
}
self.activate_event.write(1).map_err(|_| {
METRICS.activate_fails.inc();
ActivateError::EventFd
})?;
self.device_state = DeviceState::Activated(ActiveState { mem, interrupt });
Ok(())
}
fn kick(&mut self) {
if self.is_activated() {
info!("kick entropy {}.", self.id());
self.process_virtio_queues();
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use super::*;
use crate::check_metric_after_block;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::queue::VIRTQ_DESC_F_WRITE;
use crate::devices::virtio::test_utils::test::{
VirtioTestDevice, VirtioTestHelper, create_virtio_mem,
};
impl VirtioTestDevice for Entropy {
fn set_queues(&mut self, queues: Vec<Queue>) {
self.queues = queues;
}
fn num_queues(&self) -> usize {
RNG_NUM_QUEUES
}
}
fn default_entropy() -> Entropy {
Entropy::new(RateLimiter::default()).unwrap()
}
#[test]
fn test_new() {
let entropy_dev = default_entropy();
assert_eq!(entropy_dev.avail_features(), 1 << VIRTIO_F_VERSION_1);
assert_eq!(entropy_dev.acked_features(), 0);
assert!(!entropy_dev.is_activated());
}
#[test]
fn test_id() {
let entropy_dev = default_entropy();
assert_eq!(entropy_dev.id(), ENTROPY_DEV_ID);
}
#[test]
fn test_device_type() {
let entropy_dev = default_entropy();
assert_eq!(entropy_dev.device_type(), VIRTIO_ID_RNG);
}
#[test]
fn test_read_config() {
let entropy_dev = default_entropy();
let mut config = vec![0; 10];
entropy_dev.read_config(0, &mut config);
assert_eq!(config, vec![0; 10]);
entropy_dev.read_config(1, &mut config);
assert_eq!(config, vec![0; 10]);
entropy_dev.read_config(2, &mut config);
assert_eq!(config, vec![0; 10]);
entropy_dev.read_config(1024, &mut config);
assert_eq!(config, vec![0; 10]);
}
#[test]
fn test_write_config() {
let mut entropy_dev = default_entropy();
let mut read_config = vec![0; 10];
let write_config = vec![42; 10];
entropy_dev.write_config(0, &write_config);
entropy_dev.read_config(0, &mut read_config);
assert_eq!(read_config, vec![0; 10]);
entropy_dev.write_config(1, &write_config);
entropy_dev.read_config(1, &mut read_config);
assert_eq!(read_config, vec![0; 10]);
entropy_dev.write_config(2, &write_config);
entropy_dev.read_config(2, &mut read_config);
assert_eq!(read_config, vec![0; 10]);
entropy_dev.write_config(1024, &write_config);
entropy_dev.read_config(1024, &mut read_config);
assert_eq!(read_config, vec![0; 10]);
}
#[test]
fn test_handle_one() {
let mem = create_virtio_mem();
let mut th = VirtioTestHelper::<Entropy>::new(&mem, default_entropy());
// Checks that device activation works
th.activate_device(&mem);
// Add a read-only descriptor (this should fail)
th.add_desc_chain(RNG_QUEUE, 0, &[(0, 64, 0)]);
// Add a write-only descriptor with 10 bytes
th.add_desc_chain(RNG_QUEUE, 0, &[(1, 10, VIRTQ_DESC_F_WRITE)]);
// Add a write-only descriptor with 0 bytes. This should not fail.
th.add_desc_chain(RNG_QUEUE, 0, &[(2, 0, VIRTQ_DESC_F_WRITE)]);
let mut entropy_dev = th.device();
// This should succeed, we just added two descriptors
let desc = entropy_dev.queues_mut()[RNG_QUEUE].pop().unwrap().unwrap();
assert!(matches!(
// SAFETY: This descriptor chain is only loaded into one buffer
unsafe { IoVecBufferMut::<256>::from_descriptor_chain(&mem, desc) },
Err(crate::devices::virtio::iovec::IoVecError::ReadOnlyDescriptor)
));
// This should succeed, we should have one more descriptor
let desc = entropy_dev.queues_mut()[RNG_QUEUE].pop().unwrap().unwrap();
// SAFETY: This descriptor chain is only loaded into one buffer
entropy_dev.buffer = unsafe { IoVecBufferMut::from_descriptor_chain(&mem, desc).unwrap() };
entropy_dev.handle_one().unwrap();
}
#[test]
fn test_entropy_event() {
let mem = create_virtio_mem();
let mut th = VirtioTestHelper::<Entropy>::new(&mem, default_entropy());
th.activate_device(&mem);
// Add a read-only descriptor (this should fail)
th.add_desc_chain(RNG_QUEUE, 0, &[(0, 64, 0)]);
let entropy_event_fails = METRICS.entropy_event_fails.count();
let entropy_event_count = METRICS.entropy_event_count.count();
let entropy_bytes = METRICS.entropy_bytes.count();
let host_rng_fails = METRICS.host_rng_fails.count();
assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
assert_eq!(METRICS.entropy_event_fails.count(), entropy_event_fails + 1);
assert_eq!(METRICS.entropy_event_count.count(), entropy_event_count + 1);
assert_eq!(METRICS.entropy_bytes.count(), entropy_bytes);
assert_eq!(METRICS.host_rng_fails.count(), host_rng_fails);
// Add two good descriptors
th.add_desc_chain(RNG_QUEUE, 0, &[(1, 10, VIRTQ_DESC_F_WRITE)]);
th.add_desc_chain(RNG_QUEUE, 100, &[(2, 20, VIRTQ_DESC_F_WRITE)]);
let entropy_event_fails = METRICS.entropy_event_fails.count();
let entropy_event_count = METRICS.entropy_event_count.count();
let entropy_bytes = METRICS.entropy_bytes.count();
let host_rng_fails = METRICS.host_rng_fails.count();
assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
assert_eq!(METRICS.entropy_event_fails.count(), entropy_event_fails);
assert_eq!(METRICS.entropy_event_count.count(), entropy_event_count + 2);
assert_eq!(METRICS.entropy_bytes.count(), entropy_bytes + 30);
assert_eq!(METRICS.host_rng_fails.count(), host_rng_fails);
th.add_desc_chain(
RNG_QUEUE,
0,
&[
(3, 128, VIRTQ_DESC_F_WRITE),
(4, 128, VIRTQ_DESC_F_WRITE),
(5, 256, VIRTQ_DESC_F_WRITE),
],
);
let entropy_event_fails = METRICS.entropy_event_fails.count();
let entropy_event_count = METRICS.entropy_event_count.count();
let entropy_bytes = METRICS.entropy_bytes.count();
let host_rng_fails = METRICS.host_rng_fails.count();
assert_eq!(th.emulate_for_msec(100).unwrap(), 1);
assert_eq!(METRICS.entropy_event_fails.count(), entropy_event_fails);
assert_eq!(METRICS.entropy_event_count.count(), entropy_event_count + 1);
assert_eq!(METRICS.entropy_bytes.count(), entropy_bytes + 512);
assert_eq!(METRICS.host_rng_fails.count(), host_rng_fails);
}
#[test]
fn test_bad_rate_limiter_event() {
let mem = create_virtio_mem();
let mut th = VirtioTestHelper::<Entropy>::new(&mem, default_entropy());
th.activate_device(&mem);
let mut dev = th.device();
check_metric_after_block!(
&METRICS.entropy_event_fails,
1,
dev.process_rate_limiter_event()
);
}
#[test]
fn test_bandwidth_rate_limiter() {
let mem = create_virtio_mem();
// Rate Limiter with 4000 bytes / sec allowance and no initial burst allowance
let device = Entropy::new(RateLimiter::new(4000, 0, 1000, 0, 0, 0).unwrap()).unwrap();
let mut th = VirtioTestHelper::<Entropy>::new(&mem, device);
th.activate_device(&mem);
// We are asking for 4000 bytes which should be available, so the
// buffer should be processed normally
th.add_desc_chain(RNG_QUEUE, 0, &[(0, 4000, VIRTQ_DESC_F_WRITE)]);
check_metric_after_block!(
METRICS.entropy_bytes,
4000,
th.device().process_entropy_queue()
);
assert!(!th.device().rate_limiter.is_blocked());
// Completely replenish the rate limiter
th.device()
.rate_limiter
.manual_replenish(4000, TokenType::Bytes);
// Add two descriptors. The first one should drain the available budget,
// so the next one should be throttled.
th.add_desc_chain(RNG_QUEUE, 0, &[(0, 4000, VIRTQ_DESC_F_WRITE)]);
th.add_desc_chain(RNG_QUEUE, 1, &[(1, 1000, VIRTQ_DESC_F_WRITE)]);
check_metric_after_block!(
METRICS.entropy_bytes,
4000,
th.device().process_entropy_queue()
);
check_metric_after_block!(
METRICS.entropy_rate_limiter_throttled,
1,
th.device().process_entropy_queue()
);
assert!(th.device().rate_limiter().is_blocked());
// 250 msec should give enough time for replenishing 1000 bytes worth of tokens.
// Give it an extra 100 ms just to be sure the timer event reaches us from the kernel.
std::thread::sleep(Duration::from_millis(350));
check_metric_after_block!(METRICS.entropy_bytes, 1000, th.emulate_for_msec(100));
assert!(!th.device().rate_limiter().is_blocked());
}
#[test]
fn test_ops_rate_limiter() {
let mem = create_virtio_mem();
// Rate Limiter with unlimited bandwidth and allowance for 1 operation every 100 msec,
// (10 ops/sec), without initial burst.
let device = Entropy::new(RateLimiter::new(0, 0, 0, 1, 0, 100).unwrap()).unwrap();
let mut th = VirtioTestHelper::<Entropy>::new(&mem, device);
th.activate_device(&mem);
// We don't have a bandwidth limit and we can do 10 requests per sec
// so this should succeed.
th.add_desc_chain(RNG_QUEUE, 0, &[(0, 4000, VIRTQ_DESC_F_WRITE)]);
check_metric_after_block!(
METRICS.entropy_bytes,
4000,
th.device().process_entropy_queue()
);
assert!(!th.device().rate_limiter.is_blocked());
// Sleep for 1 second to completely replenish the rate limiter
std::thread::sleep(Duration::from_millis(1000));
// First one should succeed
let entropy_bytes = METRICS.entropy_bytes.count();
th.add_desc_chain(RNG_QUEUE, 0, &[(0, 64, VIRTQ_DESC_F_WRITE)]);
check_metric_after_block!(METRICS.entropy_bytes, 64, th.emulate_for_msec(100));
assert_eq!(METRICS.entropy_bytes.count(), entropy_bytes + 64);
// The rate limiter is not blocked yet.
assert!(!th.device().rate_limiter().is_blocked());
// But immediately asking another operation should block it because we have 1 op every 100
// msec.
th.add_desc_chain(RNG_QUEUE, 0, &[(0, 64, VIRTQ_DESC_F_WRITE)]);
check_metric_after_block!(
METRICS.entropy_rate_limiter_throttled,
1,
th.emulate_for_msec(50)
);
// Entropy bytes count should not have increased.
assert_eq!(METRICS.entropy_bytes.count(), entropy_bytes + 64);
// After 100 msec (plus 50 msec for ensuring the event reaches us from the kernel), the
// timer of the rate limiter should fire saying that there's now more tokens available
check_metric_after_block!(
METRICS.rate_limiter_event_count,
1,
th.emulate_for_msec(150)
);
// The rate limiter event should have processed the pending buffer as well
assert_eq!(METRICS.entropy_bytes.count(), entropy_bytes + 128);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/rng/mod.rs | src/vmm/src/devices/virtio/rng/mod.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod device;
mod event_handler;
pub mod metrics;
pub mod persist;
pub use self::device::{Entropy, EntropyError};
pub(crate) const RNG_NUM_QUEUES: usize = 1;
pub(crate) const RNG_QUEUE: usize = 0;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/rng/event_handler.rs | src/vmm/src/devices/virtio/rng/event_handler.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use event_manager::{EventOps, Events, MutEventSubscriber};
use vmm_sys_util::epoll::EventSet;
use super::{Entropy, RNG_QUEUE};
use crate::devices::virtio::device::VirtioDevice;
use crate::logger::{error, warn};
impl Entropy {
const PROCESS_ACTIVATE: u32 = 0;
const PROCESS_ENTROPY_QUEUE: u32 = 1;
const PROCESS_RATE_LIMITER: u32 = 2;
fn register_runtime_events(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.queue_events()[RNG_QUEUE],
Self::PROCESS_ENTROPY_QUEUE,
EventSet::IN,
)) {
error!("entropy: Failed to register queue event: {err}");
}
if let Err(err) = ops.add(Events::with_data(
self.rate_limiter(),
Self::PROCESS_RATE_LIMITER,
EventSet::IN,
)) {
error!("entropy: Failed to register rate-limiter event: {err}");
}
}
fn register_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
self.activate_event(),
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("entropy: Failed to register activate event: {err}");
}
}
fn process_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = self.activate_event().read() {
error!("entropy: Failed to consume activate event: {err}");
}
// Register runtime events
self.register_runtime_events(ops);
// Remove activate event
if let Err(err) = ops.remove(Events::with_data(
self.activate_event(),
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("entropy: Failed to un-register activate event: {err}");
}
}
}
impl MutEventSubscriber for Entropy {
fn init(&mut self, ops: &mut event_manager::EventOps) {
// This function can be called during different points in the device lifetime:
// - shortly after device creation,
// - on device activation (is-activated already true at this point),
// - on device restore from snapshot.
if self.is_activated() {
self.register_runtime_events(ops);
} else {
self.register_activate_event(ops);
}
}
fn process(&mut self, events: event_manager::Events, ops: &mut event_manager::EventOps) {
let event_set = events.event_set();
let source = events.data();
if !event_set.contains(EventSet::IN) {
warn!("entropy: Received unknown event: {event_set:?} from source {source}");
return;
}
if !self.is_activated() {
warn!("entropy: The device is not activated yet. Spurious event received: {source}");
return;
}
match source {
Self::PROCESS_ACTIVATE => self.process_activate_event(ops),
Self::PROCESS_ENTROPY_QUEUE => self.process_entropy_queue_event(),
Self::PROCESS_RATE_LIMITER => self.process_rate_limiter_event(),
_ => {
warn!("entropy: Unknown event received: {source}");
}
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/rng/metrics.rs | src/vmm/src/devices/virtio/rng/metrics.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system for entropy devices.
//!
//! # Metrics format
//! The metrics are flushed in JSON when requested by vmm::logger::metrics::METRICS.write().
//!
//! ## JSON example with metrics:
//! ```json
//! "entropy": {
//! "activate_fails": "SharedIncMetric",
//! "entropy_event_fails": "SharedIncMetric",
//! "entropy_event_count": "SharedIncMetric",
//! ...
//! }
//! }
//! ```
//! Each `entropy` field in the example above is a serializable `EntropyDeviceMetrics` structure
//! collecting metrics such as `activate_fails`, `entropy_event_fails` etc. for the entropy device.
//! Since entropy doesn't support multiple devices, there is no per device metrics and
//! `entropy` represents the aggregate entropy metrics.
//!
//! # Design
//! The main design goals of this system are:
//! * Have a consistent approach of keeping device related metrics in the individual devices
//! modules.
//! * To decouple entropy device metrics from logger module by moving EntropyDeviceMetrics out of
//! FirecrackerDeviceMetrics.
//! * Rely on `serde` to provide the actual serialization for writing the metrics.
//!
//! The system implements 1 type of metrics:
//! * Shared Incremental Metrics (SharedIncMetrics) - dedicated for the metrics which need a counter
//! (i.e the number of times an API request failed). These metrics are reset upon flush.
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use crate::logger::SharedIncMetric;
/// Stores aggregated entropy metrics
pub(super) static METRICS: EntropyDeviceMetrics = EntropyDeviceMetrics::new();
/// Called by METRICS.flush(), this function facilitates serialization of entropy device metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let mut seq = serializer.serialize_map(Some(1))?;
seq.serialize_entry("entropy", &METRICS)?;
seq.end()
}
#[derive(Debug, Serialize)]
pub(super) struct EntropyDeviceMetrics {
/// Number of device activation failures
pub activate_fails: SharedIncMetric,
/// Number of entropy queue event handling failures
pub entropy_event_fails: SharedIncMetric,
/// Number of entropy requests handled
pub entropy_event_count: SharedIncMetric,
/// Number of entropy bytes provided to guest
pub entropy_bytes: SharedIncMetric,
/// Number of errors while getting random bytes on host
pub host_rng_fails: SharedIncMetric,
/// Number of times an entropy request was rate limited
pub entropy_rate_limiter_throttled: SharedIncMetric,
/// Number of events associated with the rate limiter
pub rate_limiter_event_count: SharedIncMetric,
}
impl EntropyDeviceMetrics {
/// Const default construction.
const fn new() -> Self {
Self {
activate_fails: SharedIncMetric::new(),
entropy_event_fails: SharedIncMetric::new(),
entropy_event_count: SharedIncMetric::new(),
entropy_bytes: SharedIncMetric::new(),
host_rng_fails: SharedIncMetric::new(),
entropy_rate_limiter_throttled: SharedIncMetric::new(),
rate_limiter_event_count: SharedIncMetric::new(),
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::logger::IncMetric;
#[test]
fn test_entropy_dev_metrics() {
let entropy_metrics: EntropyDeviceMetrics = EntropyDeviceMetrics::new();
let entropy_metrics_local: String = serde_json::to_string(&entropy_metrics).unwrap();
// the 1st serialize flushes the metrics and resets values to 0 so that
// we can compare the values with local metrics.
serde_json::to_string(&METRICS).unwrap();
let entropy_metrics_global: String = serde_json::to_string(&METRICS).unwrap();
assert_eq!(entropy_metrics_local, entropy_metrics_global);
entropy_metrics.entropy_event_count.inc();
assert_eq!(entropy_metrics.entropy_event_count.count(), 1);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/balloon/persist.rs | src/vmm/src/devices/virtio/balloon/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring balloon devices.
use std::sync::Arc;
use std::time::Duration;
use serde::{Deserialize, Serialize};
use super::*;
use crate::devices::virtio::balloon::device::{BalloonStats, ConfigSpace, HintingState};
use crate::devices::virtio::device::{ActiveState, DeviceState};
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_BALLOON;
use crate::devices::virtio::persist::VirtioDeviceState;
use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE;
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::snapshot::Persist;
use crate::vstate::memory::GuestMemoryMmap;
/// Information about the balloon config's that are saved
/// at snapshot.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BalloonConfigSpaceState {
num_pages: u32,
actual_pages: u32,
}
/// Information about the balloon stats that are saved
/// at snapshot.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BalloonStatsState {
swap_in: Option<u64>,
swap_out: Option<u64>,
major_faults: Option<u64>,
minor_faults: Option<u64>,
free_memory: Option<u64>,
total_memory: Option<u64>,
available_memory: Option<u64>,
disk_caches: Option<u64>,
hugetlb_allocations: Option<u64>,
hugetlb_failures: Option<u64>,
oom_kill: Option<u64>,
alloc_stall: Option<u64>,
async_scan: Option<u64>,
direct_scan: Option<u64>,
async_reclaim: Option<u64>,
direct_reclaim: Option<u64>,
}
impl BalloonStatsState {
fn from_stats(stats: &BalloonStats) -> Self {
Self {
swap_in: stats.swap_in,
swap_out: stats.swap_out,
major_faults: stats.major_faults,
minor_faults: stats.minor_faults,
free_memory: stats.free_memory,
total_memory: stats.total_memory,
available_memory: stats.available_memory,
disk_caches: stats.disk_caches,
hugetlb_allocations: stats.hugetlb_allocations,
hugetlb_failures: stats.hugetlb_failures,
oom_kill: stats.oom_kill,
alloc_stall: stats.alloc_stall,
async_scan: stats.async_scan,
direct_scan: stats.direct_scan,
async_reclaim: stats.async_reclaim,
direct_reclaim: stats.direct_reclaim,
}
}
fn create_stats(&self) -> BalloonStats {
BalloonStats {
target_pages: 0,
actual_pages: 0,
target_mib: 0,
actual_mib: 0,
swap_in: self.swap_in,
swap_out: self.swap_out,
major_faults: self.major_faults,
minor_faults: self.minor_faults,
free_memory: self.free_memory,
total_memory: self.total_memory,
available_memory: self.available_memory,
disk_caches: self.disk_caches,
hugetlb_allocations: self.hugetlb_allocations,
hugetlb_failures: self.hugetlb_failures,
oom_kill: self.oom_kill,
alloc_stall: self.alloc_stall,
async_scan: self.async_scan,
direct_scan: self.direct_scan,
async_reclaim: self.async_reclaim,
direct_reclaim: self.direct_reclaim,
}
}
}
/// Information about the balloon that are saved
/// at snapshot.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BalloonState {
stats_polling_interval_s: u16,
stats_desc_index: Option<u16>,
latest_stats: BalloonStatsState,
config_space: BalloonConfigSpaceState,
hinting_state: HintingState,
pub virtio_state: VirtioDeviceState,
}
/// Auxiliary structure for creating a device when resuming from a snapshot.
#[derive(Debug)]
pub struct BalloonConstructorArgs {
/// Pointer to guest memory.
pub mem: GuestMemoryMmap,
}
impl Persist<'_> for Balloon {
type State = BalloonState;
type ConstructorArgs = BalloonConstructorArgs;
type Error = super::BalloonError;
fn save(&self) -> Self::State {
BalloonState {
stats_polling_interval_s: self.stats_polling_interval_s,
stats_desc_index: self.stats_desc_index,
latest_stats: BalloonStatsState::from_stats(&self.latest_stats),
hinting_state: self.hinting_state,
config_space: BalloonConfigSpaceState {
num_pages: self.config_space.num_pages,
actual_pages: self.config_space.actual_pages,
},
virtio_state: VirtioDeviceState::from_device(self),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let free_page_hinting =
state.virtio_state.avail_features & (1u64 << VIRTIO_BALLOON_F_FREE_PAGE_HINTING) != 0;
let free_page_reporting =
state.virtio_state.avail_features & (1u64 << VIRTIO_BALLOON_F_FREE_PAGE_REPORTING) != 0;
// We can safely create the balloon with arbitrary flags and
// num_pages because we will overwrite them after.
let mut balloon = Balloon::new(
0,
false,
state.stats_polling_interval_s,
free_page_hinting,
free_page_reporting,
)?;
let mut num_queues = BALLOON_MIN_NUM_QUEUES;
// As per the virtio 1.1 specification, the statistics queue
// should not exist if the statistics are not enabled.
if state.stats_polling_interval_s > 0 {
num_queues += 1;
}
if free_page_hinting {
num_queues += 1;
}
if free_page_reporting {
num_queues += 1;
}
balloon.queues = state
.virtio_state
.build_queues_checked(
&constructor_args.mem,
VIRTIO_ID_BALLOON,
num_queues,
FIRECRACKER_MAX_QUEUE_SIZE,
)
.map_err(|_| Self::Error::QueueRestoreError)?;
balloon.avail_features = state.virtio_state.avail_features;
balloon.acked_features = state.virtio_state.acked_features;
balloon.latest_stats = state.latest_stats.create_stats();
balloon.config_space = ConfigSpace {
num_pages: state.config_space.num_pages,
actual_pages: state.config_space.actual_pages,
// On restore allow the guest to reclaim pages
free_page_hint_cmd_id: FREE_PAGE_HINT_DONE,
};
balloon.hinting_state = state.hinting_state;
if state.virtio_state.activated && balloon.stats_enabled() {
// Restore the stats descriptor.
balloon.set_stats_desc_index(state.stats_desc_index);
// Restart timer if needed.
let duration = Duration::from_secs(state.stats_polling_interval_s as u64);
balloon.stats_timer.arm(duration, Some(duration));
}
Ok(balloon)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::test_utils::{default_interrupt, default_mem};
use crate::snapshot::Snapshot;
#[test]
fn test_persistence() {
let guest_mem = default_mem();
let mut mem = vec![0; 4096];
// Create and save the balloon device.
let balloon = Balloon::new(0x42, false, 2, false, false).unwrap();
Snapshot::new(balloon.save())
.save(&mut mem.as_mut_slice())
.unwrap();
// Deserialize and restore the balloon device.
let restored_balloon = Balloon::restore(
BalloonConstructorArgs { mem: guest_mem },
&Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data,
)
.unwrap();
assert_eq!(restored_balloon.device_type(), VIRTIO_ID_BALLOON);
assert_eq!(restored_balloon.acked_features, balloon.acked_features);
assert_eq!(restored_balloon.avail_features, balloon.avail_features);
assert_eq!(
restored_balloon.config_space.num_pages,
balloon.config_space.num_pages
);
assert_eq!(
restored_balloon.config_space.actual_pages,
balloon.config_space.actual_pages
);
assert_eq!(
restored_balloon.config_space.free_page_hint_cmd_id,
FREE_PAGE_HINT_DONE
);
assert_eq!(restored_balloon.queues(), balloon.queues());
assert!(!restored_balloon.is_activated());
assert!(!balloon.is_activated());
assert_eq!(
restored_balloon.stats_polling_interval_s,
balloon.stats_polling_interval_s
);
assert_eq!(restored_balloon.stats_desc_index, balloon.stats_desc_index);
assert_eq!(restored_balloon.latest_stats, balloon.latest_stats);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/balloon/device.rs | src/vmm/src/devices/virtio/balloon/device.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::ops::Deref;
use std::sync::Arc;
use std::time::Duration;
use log::{debug, error, info, warn};
use serde::{Deserialize, Serialize};
use utils::time::TimerFd;
use vmm_sys_util::eventfd::EventFd;
use super::super::ActivateError;
use super::super::device::{DeviceState, VirtioDevice};
use super::super::queue::Queue;
use super::metrics::METRICS;
use super::util::compact_page_frame_numbers;
use super::{
BALLOON_DEV_ID, BALLOON_MIN_NUM_QUEUES, BALLOON_QUEUE_SIZE, DEFLATE_INDEX, FREE_PAGE_HINT_DONE,
FREE_PAGE_HINT_STOP, INFLATE_INDEX, MAX_PAGE_COMPACT_BUFFER, MAX_PAGES_IN_DESC,
MIB_TO_4K_PAGES, STATS_INDEX, VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
VIRTIO_BALLOON_F_FREE_PAGE_HINTING, VIRTIO_BALLOON_F_FREE_PAGE_REPORTING,
VIRTIO_BALLOON_F_STATS_VQ, VIRTIO_BALLOON_PFN_SHIFT, VIRTIO_BALLOON_S_ALLOC_STALL,
VIRTIO_BALLOON_S_ASYNC_RECLAIM, VIRTIO_BALLOON_S_ASYNC_SCAN, VIRTIO_BALLOON_S_AVAIL,
VIRTIO_BALLOON_S_CACHES, VIRTIO_BALLOON_S_DIRECT_RECLAIM, VIRTIO_BALLOON_S_DIRECT_SCAN,
VIRTIO_BALLOON_S_HTLB_PGALLOC, VIRTIO_BALLOON_S_HTLB_PGFAIL, VIRTIO_BALLOON_S_MAJFLT,
VIRTIO_BALLOON_S_MEMFREE, VIRTIO_BALLOON_S_MEMTOT, VIRTIO_BALLOON_S_MINFLT,
VIRTIO_BALLOON_S_OOM_KILL, VIRTIO_BALLOON_S_SWAP_IN, VIRTIO_BALLOON_S_SWAP_OUT,
};
use crate::devices::virtio::balloon::BalloonError;
use crate::devices::virtio::device::ActiveState;
use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_BALLOON;
use crate::devices::virtio::queue::InvalidAvailIdx;
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::logger::{IncMetric, log_dev_preview_warning};
use crate::utils::u64_to_usize;
use crate::vstate::memory::{
Address, ByteValued, Bytes, GuestAddress, GuestMemoryExtension, GuestMemoryMmap,
};
use crate::{impl_device_type, mem_size_mib};
const SIZE_OF_U32: usize = std::mem::size_of::<u32>();
const SIZE_OF_STAT: usize = std::mem::size_of::<BalloonStat>();
fn mib_to_pages(amount_mib: u32) -> Result<u32, BalloonError> {
amount_mib
.checked_mul(MIB_TO_4K_PAGES)
.ok_or(BalloonError::TooMuchMemoryRequested(
u32::MAX / MIB_TO_4K_PAGES,
))
}
fn pages_to_mib(amount_pages: u32) -> u32 {
amount_pages / MIB_TO_4K_PAGES
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, PartialEq)]
pub(crate) struct ConfigSpace {
pub num_pages: u32,
pub actual_pages: u32,
pub free_page_hint_cmd_id: u32,
}
// SAFETY: Safe because ConfigSpace only contains plain data.
unsafe impl ByteValued for ConfigSpace {}
/// Holds state of the free page hinting run
#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)]
pub(crate) struct HintingState {
/// The command requested by us. Set to STOP by default.
pub host_cmd: u32,
/// The last command supplied by guest.
pub last_cmd_id: u32,
/// The command supplied by guest.
pub guest_cmd: Option<u32>,
/// Whether or not to automatically ack on STOP.
pub acknowledge_on_finish: bool,
}
/// By default hinting will ack on stop
fn default_ack_on_stop() -> bool {
true
}
/// Command recieved from the API to start a hinting run
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize)]
pub struct StartHintingCmd {
/// If we should automatically acknowledge end of the run after stop.
#[serde(default = "default_ack_on_stop")]
pub acknowledge_on_stop: bool,
}
impl Default for StartHintingCmd {
fn default() -> Self {
Self {
acknowledge_on_stop: true,
}
}
}
/// Returned to the API for get hinting status
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default, Serialize)]
pub struct HintingStatus {
/// The command requested by us. Set to STOP by default.
pub host_cmd: u32,
/// The command supplied by guest.
pub guest_cmd: Option<u32>,
}
// This structure needs the `packed` attribute, otherwise Rust will assume
// the size to be 16 bytes.
#[derive(Copy, Clone, Debug, Default)]
#[repr(C, packed)]
struct BalloonStat {
pub tag: u16,
pub val: u64,
}
// SAFETY: Safe because BalloonStat only contains plain data.
unsafe impl ByteValued for BalloonStat {}
/// Holds configuration details for the balloon device.
#[derive(Clone, Default, Debug, PartialEq, Eq, Serialize)]
pub struct BalloonConfig {
/// Target size.
pub amount_mib: u32,
/// Whether or not to ask for pages back.
pub deflate_on_oom: bool,
/// Interval of time in seconds at which the balloon statistics are updated.
pub stats_polling_interval_s: u16,
/// Free page hinting enabled
#[serde(default)]
pub free_page_hinting: bool,
/// Free page reporting enabled
#[serde(default)]
pub free_page_reporting: bool,
}
/// BalloonStats holds statistics returned from the stats_queue.
#[derive(Clone, Copy, Default, Debug, PartialEq, Eq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct BalloonStats {
/// The target size of the balloon, in 4K pages.
pub target_pages: u32,
/// The number of 4K pages the device is currently holding.
pub actual_pages: u32,
/// The target size of the balloon, in MiB.
pub target_mib: u32,
/// The number of MiB the device is currently holding.
pub actual_mib: u32,
/// Amount of memory swapped in.
#[serde(skip_serializing_if = "Option::is_none")]
pub swap_in: Option<u64>,
/// Amount of memory swapped out.
#[serde(skip_serializing_if = "Option::is_none")]
pub swap_out: Option<u64>,
/// Number of major faults.
#[serde(skip_serializing_if = "Option::is_none")]
pub major_faults: Option<u64>,
/// Number of minor faults.
#[serde(skip_serializing_if = "Option::is_none")]
pub minor_faults: Option<u64>,
/// The amount of memory not being used for any
/// purpose (in bytes).
#[serde(skip_serializing_if = "Option::is_none")]
pub free_memory: Option<u64>,
/// Total amount of memory available (in bytes).
#[serde(skip_serializing_if = "Option::is_none")]
pub total_memory: Option<u64>,
/// An estimate of how much memory is available (in
/// bytes) for starting new applications, without pushing the system to swap.
#[serde(skip_serializing_if = "Option::is_none")]
pub available_memory: Option<u64>,
/// The amount of memory, in bytes, that can be
/// quickly reclaimed without additional I/O. Typically these pages are used for
/// caching files from disk.
#[serde(skip_serializing_if = "Option::is_none")]
pub disk_caches: Option<u64>,
/// The number of successful hugetlb page
/// allocations in the guest.
#[serde(skip_serializing_if = "Option::is_none")]
pub hugetlb_allocations: Option<u64>,
/// The number of failed hugetlb page allocations
/// in the guest.
#[serde(skip_serializing_if = "Option::is_none")]
pub hugetlb_failures: Option<u64>,
/// OOM killer invocations. since linux v6.12.
#[serde(skip_serializing_if = "Option::is_none")]
pub oom_kill: Option<u64>,
/// Stall count of memory allocatoin. since linux v6.12.
#[serde(skip_serializing_if = "Option::is_none")]
pub alloc_stall: Option<u64>,
/// Amount of memory scanned asynchronously. since linux v6.12.
#[serde(skip_serializing_if = "Option::is_none")]
pub async_scan: Option<u64>,
/// Amount of memory scanned directly. since linux v6.12.
#[serde(skip_serializing_if = "Option::is_none")]
pub direct_scan: Option<u64>,
/// Amount of memory reclaimed asynchronously. since linux v6.12.
#[serde(skip_serializing_if = "Option::is_none")]
pub async_reclaim: Option<u64>,
/// Amount of memory reclaimed directly. since linux v6.12.
#[serde(skip_serializing_if = "Option::is_none")]
pub direct_reclaim: Option<u64>,
}
impl BalloonStats {
fn update_with_stat(&mut self, stat: &BalloonStat) {
let val = Some(stat.val);
match stat.tag {
VIRTIO_BALLOON_S_SWAP_IN => self.swap_in = val,
VIRTIO_BALLOON_S_SWAP_OUT => self.swap_out = val,
VIRTIO_BALLOON_S_MAJFLT => self.major_faults = val,
VIRTIO_BALLOON_S_MINFLT => self.minor_faults = val,
VIRTIO_BALLOON_S_MEMFREE => self.free_memory = val,
VIRTIO_BALLOON_S_MEMTOT => self.total_memory = val,
VIRTIO_BALLOON_S_AVAIL => self.available_memory = val,
VIRTIO_BALLOON_S_CACHES => self.disk_caches = val,
VIRTIO_BALLOON_S_HTLB_PGALLOC => self.hugetlb_allocations = val,
VIRTIO_BALLOON_S_HTLB_PGFAIL => self.hugetlb_failures = val,
VIRTIO_BALLOON_S_OOM_KILL => self.oom_kill = val,
VIRTIO_BALLOON_S_ALLOC_STALL => self.alloc_stall = val,
VIRTIO_BALLOON_S_ASYNC_SCAN => self.async_scan = val,
VIRTIO_BALLOON_S_DIRECT_SCAN => self.direct_scan = val,
VIRTIO_BALLOON_S_ASYNC_RECLAIM => self.async_reclaim = val,
VIRTIO_BALLOON_S_DIRECT_RECLAIM => self.direct_reclaim = val,
tag => {
METRICS.stats_update_fails.inc();
debug!("balloon: unknown stats update tag: {tag}");
}
}
}
}
/// Virtio balloon device.
#[derive(Debug)]
pub struct Balloon {
// Virtio fields.
pub(crate) avail_features: u64,
pub(crate) acked_features: u64,
pub(crate) config_space: ConfigSpace,
pub(crate) activate_evt: EventFd,
// Transport related fields.
pub(crate) queues: Vec<Queue>,
pub(crate) queue_evts: Vec<EventFd>,
pub(crate) device_state: DeviceState,
// Implementation specific fields.
pub(crate) stats_polling_interval_s: u16,
pub(crate) stats_timer: TimerFd,
// The index of the previous stats descriptor is saved because
// it is acknowledged after the stats queue is processed.
pub(crate) stats_desc_index: Option<u16>,
pub(crate) latest_stats: BalloonStats,
// A buffer used as pfn accumulator during descriptor processing.
pub(crate) pfn_buffer: [u32; MAX_PAGE_COMPACT_BUFFER],
// Holds state for free page hinting
pub(crate) hinting_state: HintingState,
}
impl Balloon {
/// Instantiate a new balloon device.
pub fn new(
amount_mib: u32,
deflate_on_oom: bool,
stats_polling_interval_s: u16,
free_page_hinting: bool,
free_page_reporting: bool,
) -> Result<Balloon, BalloonError> {
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
if deflate_on_oom {
avail_features |= 1u64 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM;
};
// The VirtIO specification states that the statistics queue should
// not be present at all if the statistics are not enabled.
let mut queue_count = BALLOON_MIN_NUM_QUEUES;
if stats_polling_interval_s > 0 {
avail_features |= 1u64 << VIRTIO_BALLOON_F_STATS_VQ;
queue_count += 1;
}
if free_page_hinting {
log_dev_preview_warning("Free Page Hinting", None);
avail_features |= 1u64 << VIRTIO_BALLOON_F_FREE_PAGE_HINTING;
queue_count += 1;
}
if free_page_reporting {
avail_features |= 1u64 << VIRTIO_BALLOON_F_FREE_PAGE_REPORTING;
queue_count += 1;
}
let queues: Vec<Queue> = (0..queue_count)
.map(|_| Queue::new(BALLOON_QUEUE_SIZE))
.collect();
let queue_evts = (0..queue_count)
.map(|_| EventFd::new(libc::EFD_NONBLOCK).map_err(BalloonError::EventFd))
.collect::<Result<Vec<_>, _>>()?;
let stats_timer = TimerFd::new();
Ok(Balloon {
avail_features,
acked_features: 0u64,
config_space: ConfigSpace {
num_pages: mib_to_pages(amount_mib)?,
actual_pages: 0,
free_page_hint_cmd_id: FREE_PAGE_HINT_STOP,
},
queue_evts,
queues,
device_state: DeviceState::Inactive,
activate_evt: EventFd::new(libc::EFD_NONBLOCK).map_err(BalloonError::EventFd)?,
stats_polling_interval_s,
stats_timer,
stats_desc_index: None,
latest_stats: BalloonStats::default(),
pfn_buffer: [0u32; MAX_PAGE_COMPACT_BUFFER],
hinting_state: Default::default(),
})
}
pub(crate) fn process_inflate_queue_event(&mut self) -> Result<(), BalloonError> {
self.queue_evts[INFLATE_INDEX]
.read()
.map_err(BalloonError::EventFd)?;
self.process_inflate()
}
pub(crate) fn process_deflate_queue_event(&mut self) -> Result<(), BalloonError> {
self.queue_evts[DEFLATE_INDEX]
.read()
.map_err(BalloonError::EventFd)?;
self.process_deflate_queue()
}
pub(crate) fn process_stats_queue_event(&mut self) -> Result<(), BalloonError> {
self.queue_evts[STATS_INDEX]
.read()
.map_err(BalloonError::EventFd)?;
self.process_stats_queue()
}
pub(crate) fn process_stats_timer_event(&mut self) -> Result<(), BalloonError> {
_ = self.stats_timer.read();
self.trigger_stats_update()
}
pub(crate) fn process_free_page_hinting_queue_event(&mut self) -> Result<(), BalloonError> {
self.queue_evts[self.free_page_hinting_idx()]
.read()
.map_err(BalloonError::EventFd)?;
self.process_free_page_hinting_queue()
}
pub(crate) fn process_free_page_reporting_queue_event(&mut self) -> Result<(), BalloonError> {
self.queue_evts[self.free_page_reporting_idx()]
.read()
.map_err(BalloonError::EventFd)?;
self.process_free_page_reporting_queue()
}
pub(crate) fn process_inflate(&mut self) -> Result<(), BalloonError> {
// This is safe since we checked in the event handler that the device is activated.
let mem = &self
.device_state
.active_state()
.ok_or(BalloonError::DeviceNotActive)?
.mem;
METRICS.inflate_count.inc();
let queue = &mut self.queues[INFLATE_INDEX];
// The pfn buffer index used during descriptor processing.
let mut pfn_buffer_idx = 0;
let mut needs_interrupt = false;
let mut valid_descs_found = true;
// Loop until there are no more valid DescriptorChains.
while valid_descs_found {
valid_descs_found = false;
// Internal loop processes descriptors and acummulates the pfns in `pfn_buffer`.
// Breaks out when there is not enough space in `pfn_buffer` to completely process
// the next descriptor.
while let Some(head) = queue.pop()? {
let len = head.len as usize;
let max_len = MAX_PAGES_IN_DESC * SIZE_OF_U32;
valid_descs_found = true;
if !head.is_write_only() && len % SIZE_OF_U32 == 0 {
// Check descriptor pfn count.
if len > max_len {
error!(
"Inflate descriptor has bogus page count {} > {}, skipping.",
len / SIZE_OF_U32,
MAX_PAGES_IN_DESC
);
// Skip descriptor.
continue;
}
// Break loop if `pfn_buffer` will be overrun by adding all pfns from current
// desc.
if MAX_PAGE_COMPACT_BUFFER - pfn_buffer_idx < len / SIZE_OF_U32 {
queue.undo_pop();
break;
}
// This is safe, `len` was validated above.
for index in (0..len).step_by(SIZE_OF_U32) {
let addr = head
.addr
.checked_add(index as u64)
.ok_or(BalloonError::MalformedDescriptor)?;
let page_frame_number = mem
.read_obj::<u32>(addr)
.map_err(|_| BalloonError::MalformedDescriptor)?;
self.pfn_buffer[pfn_buffer_idx] = page_frame_number;
pfn_buffer_idx += 1;
}
}
// Acknowledge the receipt of the descriptor.
// 0 is number of bytes the device has written to memory.
queue.add_used(head.index, 0)?;
needs_interrupt = true;
}
// Compact pages into ranges.
let page_ranges = compact_page_frame_numbers(&mut self.pfn_buffer[..pfn_buffer_idx]);
pfn_buffer_idx = 0;
// Remove the page ranges.
for (page_frame_number, range_len) in page_ranges {
let guest_addr =
GuestAddress(u64::from(page_frame_number) << VIRTIO_BALLOON_PFN_SHIFT);
if let Err(err) = mem.discard_range(
guest_addr,
usize::try_from(range_len).unwrap() << VIRTIO_BALLOON_PFN_SHIFT,
) {
error!("Error removing memory range: {:?}", err);
}
}
}
queue.advance_used_ring_idx();
if needs_interrupt {
self.signal_used_queue(INFLATE_INDEX)?;
}
Ok(())
}
pub(crate) fn process_deflate_queue(&mut self) -> Result<(), BalloonError> {
METRICS.deflate_count.inc();
let queue = &mut self.queues[DEFLATE_INDEX];
let mut needs_interrupt = false;
while let Some(head) = queue.pop()? {
queue.add_used(head.index, 0)?;
needs_interrupt = true;
}
queue.advance_used_ring_idx();
if needs_interrupt {
self.signal_used_queue(DEFLATE_INDEX)
} else {
Ok(())
}
}
pub(crate) fn process_stats_queue(&mut self) -> Result<(), BalloonError> {
// This is safe since we checked in the event handler that the device is activated.
let mem = &self.device_state.active_state().unwrap().mem;
METRICS.stats_updates_count.inc();
while let Some(head) = self.queues[STATS_INDEX].pop()? {
if let Some(prev_stats_desc) = self.stats_desc_index {
// We shouldn't ever have an extra buffer if the driver follows
// the protocol, but return it if we find one.
error!("balloon: driver is not compliant, more than one stats buffer received");
self.queues[STATS_INDEX].add_used(prev_stats_desc, 0)?;
}
for index in (0..head.len).step_by(SIZE_OF_STAT) {
// Read the address at position `index`. The only case
// in which this fails is if there is overflow,
// in which case this descriptor is malformed,
// so we ignore the rest of it.
let addr = head
.addr
.checked_add(u64::from(index))
.ok_or(BalloonError::MalformedDescriptor)?;
let stat = mem
.read_obj::<BalloonStat>(addr)
.map_err(|_| BalloonError::MalformedDescriptor)?;
self.latest_stats.update_with_stat(&stat);
}
self.stats_desc_index = Some(head.index);
}
Ok(())
}
pub(crate) fn process_free_page_hinting_queue(&mut self) -> Result<(), BalloonError> {
let mem = &self
.device_state
.active_state()
.ok_or(BalloonError::DeviceNotActive)?
.mem;
let idx = self.free_page_hinting_idx();
let queue = &mut self.queues[idx];
let host_cmd = self.hinting_state.host_cmd;
let mut needs_interrupt = false;
let mut complete = false;
while let Some(head) = queue.pop()? {
let head_index = head.index;
let mut last_desc = Some(head);
while let Some(desc) = last_desc {
last_desc = desc.next_descriptor();
// Updated cmd_ids are always of length 4
if desc.len == 4 {
complete = false;
let cmd = mem
.read_obj::<u32>(desc.addr)
.map_err(|_| BalloonError::MalformedDescriptor)?;
self.hinting_state.guest_cmd = Some(cmd);
if cmd == FREE_PAGE_HINT_STOP {
complete = true;
}
// We don't expect this from the driver, but lets treat as a stop
if cmd == FREE_PAGE_HINT_DONE {
warn!("balloon hinting: Unexpected cmd from guest: {cmd}");
complete = true;
}
continue;
}
// If we've requested done we have to discard any in-flight hints
if host_cmd == FREE_PAGE_HINT_DONE || host_cmd == FREE_PAGE_HINT_STOP {
continue;
}
let Some(chain_cmd) = self.hinting_state.guest_cmd else {
warn!("balloon hinting: received range with no command id.");
continue;
};
if chain_cmd != host_cmd {
info!("balloon hinting: Received chain from previous command ignoring.");
continue;
}
METRICS.free_page_hint_count.inc();
if let Err(err) = mem.discard_range(desc.addr, desc.len as usize) {
METRICS.free_page_hint_fails.inc();
error!("balloon hinting: failed to remove range: {err:?}");
} else {
METRICS.free_page_hint_freed.add(desc.len as u64);
}
}
queue.add_used(head.index, 0)?;
needs_interrupt = true;
}
queue.advance_used_ring_idx();
if needs_interrupt {
self.signal_used_queue(idx)?;
}
if complete && self.hinting_state.acknowledge_on_finish {
self.update_free_page_hint_cmd(FREE_PAGE_HINT_DONE);
}
Ok(())
}
pub(crate) fn process_free_page_reporting_queue(&mut self) -> Result<(), BalloonError> {
let mem = &self
.device_state
.active_state()
.ok_or(BalloonError::DeviceNotActive)?
.mem;
let idx = self.free_page_reporting_idx();
let queue = &mut self.queues[idx];
let mut needs_interrupt = false;
while let Some(head) = queue.pop()? {
let head_index = head.index;
let mut last_desc = Some(head);
while let Some(desc) = last_desc {
METRICS.free_page_report_count.inc();
if let Err(err) = mem.discard_range(desc.addr, desc.len as usize) {
METRICS.free_page_report_fails.inc();
error!("balloon: failed to remove range: {err:?}");
} else {
METRICS.free_page_report_freed.add(desc.len as u64);
}
last_desc = desc.next_descriptor();
}
queue.add_used(head.index, 0)?;
needs_interrupt = true;
}
queue.advance_used_ring_idx();
if needs_interrupt {
self.signal_used_queue(idx)?;
}
Ok(())
}
pub(crate) fn signal_used_queue(&self, qidx: usize) -> Result<(), BalloonError> {
self.interrupt_trigger()
.trigger(VirtioInterruptType::Queue(
qidx.try_into()
.unwrap_or_else(|_| panic!("balloon: invalid queue id: {qidx}")),
))
.map_err(|err| {
METRICS.event_fails.inc();
BalloonError::InterruptError(err)
})
}
/// Process device virtio queue(s).
pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> {
if let Err(BalloonError::InvalidAvailIdx(err)) = self.process_inflate() {
return Err(err);
}
if let Err(BalloonError::InvalidAvailIdx(err)) = self.process_deflate_queue() {
return Err(err);
}
if self.free_page_hinting()
&& let Err(BalloonError::InvalidAvailIdx(err)) = self.process_free_page_hinting_queue()
{
return Err(err);
}
if self.free_page_reporting()
&& let Err(BalloonError::InvalidAvailIdx(err)) =
self.process_free_page_reporting_queue()
{
return Err(err);
}
Ok(())
}
/// Provides the ID of this balloon device.
pub fn id(&self) -> &str {
BALLOON_DEV_ID
}
fn trigger_stats_update(&mut self) -> Result<(), BalloonError> {
// The communication is driven by the device by using the buffer
// and sending a used buffer notification
if let Some(index) = self.stats_desc_index.take() {
self.queues[STATS_INDEX].add_used(index, 0)?;
self.queues[STATS_INDEX].advance_used_ring_idx();
self.signal_used_queue(STATS_INDEX)
} else {
error!("Failed to update balloon stats, missing descriptor.");
Ok(())
}
}
/// Update the target size of the balloon.
pub fn update_size(&mut self, amount_mib: u32) -> Result<(), BalloonError> {
if self.is_activated() {
let mem = &self.device_state.active_state().unwrap().mem;
// The balloon cannot have a target size greater than the size of
// the guest memory.
if u64::from(amount_mib) > mem_size_mib(mem) {
return Err(BalloonError::TooMuchMemoryRequested(amount_mib));
}
self.config_space.num_pages = mib_to_pages(amount_mib)?;
self.interrupt_trigger()
.trigger(VirtioInterruptType::Config)
.map_err(BalloonError::InterruptError)
} else {
Err(BalloonError::DeviceNotActive)
}
}
pub fn free_page_hinting(&self) -> bool {
self.avail_features & (1u64 << VIRTIO_BALLOON_F_FREE_PAGE_HINTING) != 0
}
pub fn free_page_hinting_idx(&self) -> usize {
let mut idx = BALLOON_MIN_NUM_QUEUES;
if self.stats_polling_interval_s > 0 {
idx += 1;
}
idx
}
pub fn free_page_reporting(&self) -> bool {
self.avail_features & (1u64 << VIRTIO_BALLOON_F_FREE_PAGE_REPORTING) != 0
}
pub fn free_page_reporting_idx(&self) -> usize {
let mut idx = BALLOON_MIN_NUM_QUEUES;
if self.stats_polling_interval_s > 0 {
idx += 1;
}
if self.free_page_hinting() {
idx += 1;
}
idx
}
/// Update the statistics polling interval.
pub fn update_stats_polling_interval(&mut self, interval_s: u16) -> Result<(), BalloonError> {
if self.stats_polling_interval_s == interval_s {
return Ok(());
}
if self.stats_polling_interval_s == 0 || interval_s == 0 {
return Err(BalloonError::StatisticsStateChange);
}
self.trigger_stats_update()?;
self.stats_polling_interval_s = interval_s;
self.update_timer_state();
Ok(())
}
pub fn update_timer_state(&mut self) {
let duration = Duration::from_secs(self.stats_polling_interval_s as u64);
self.stats_timer.arm(duration, Some(duration));
}
/// Obtain the number of 4K pages the device is currently holding.
pub fn num_pages(&self) -> u32 {
self.config_space.num_pages
}
/// Obtain the size of 4K pages the device is currently holding in MIB.
pub fn size_mb(&self) -> u32 {
pages_to_mib(self.config_space.num_pages)
}
pub fn deflate_on_oom(&self) -> bool {
self.avail_features & (1u64 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM) != 0
}
pub fn stats_polling_interval_s(&self) -> u16 {
self.stats_polling_interval_s
}
/// Retrieve latest stats for the balloon device.
pub fn latest_stats(&mut self) -> Result<BalloonStats, BalloonError> {
if self.stats_enabled() {
self.latest_stats.target_pages = self.config_space.num_pages;
self.latest_stats.actual_pages = self.config_space.actual_pages;
self.latest_stats.target_mib = pages_to_mib(self.latest_stats.target_pages);
self.latest_stats.actual_mib = pages_to_mib(self.latest_stats.actual_pages);
Ok(self.latest_stats)
} else {
Err(BalloonError::StatisticsDisabled)
}
}
/// Update the free page hinting cmd
pub fn update_free_page_hint_cmd(&mut self, cmd_id: u32) -> Result<(), BalloonError> {
if !self.is_activated() {
return Err(BalloonError::DeviceNotActive);
}
self.hinting_state.host_cmd = cmd_id;
self.config_space.free_page_hint_cmd_id = cmd_id;
self.interrupt_trigger()
.trigger(VirtioInterruptType::Config)
.map_err(BalloonError::InterruptError)
}
/// Starts a hinting run by setting the cmd_id to a new value.
pub(crate) fn start_hinting(&mut self, cmd: StartHintingCmd) -> Result<(), BalloonError> {
if !self.free_page_hinting() {
return Err(BalloonError::HintingNotEnabled);
}
let mut cmd_id = self.hinting_state.last_cmd_id.wrapping_add(1);
// 0 and 1 are reserved and cannot be used to start a hinting run
if cmd_id <= 1 {
cmd_id = 2;
}
self.hinting_state.acknowledge_on_finish = cmd.acknowledge_on_stop;
self.hinting_state.last_cmd_id = cmd_id;
self.update_free_page_hint_cmd(cmd_id)
}
/// Return the status of the hinting including the last command we sent to the driver
/// and the last cmd sent from the driver
pub(crate) fn get_hinting_status(&self) -> Result<HintingStatus, BalloonError> {
if !self.free_page_hinting() {
return Err(BalloonError::HintingNotEnabled);
}
Ok(HintingStatus {
host_cmd: self.hinting_state.host_cmd,
guest_cmd: self.hinting_state.guest_cmd,
})
}
/// Stops the hinting run allowing the guest to reclaim hinted pages
pub(crate) fn stop_hinting(&mut self) -> Result<(), BalloonError> {
if !self.free_page_hinting() {
Err(BalloonError::HintingNotEnabled)
} else {
self.update_free_page_hint_cmd(FREE_PAGE_HINT_DONE)
}
}
/// Return the config of the balloon device.
pub fn config(&self) -> BalloonConfig {
BalloonConfig {
amount_mib: self.size_mb(),
deflate_on_oom: self.deflate_on_oom(),
stats_polling_interval_s: self.stats_polling_interval_s(),
free_page_hinting: self.free_page_hinting(),
free_page_reporting: self.free_page_reporting(),
}
}
pub(crate) fn stats_enabled(&self) -> bool {
self.stats_polling_interval_s > 0
}
pub(crate) fn set_stats_desc_index(&mut self, stats_desc_index: Option<u16>) {
self.stats_desc_index = stats_desc_index;
}
}
impl VirtioDevice for Balloon {
impl_device_type!(VIRTIO_ID_BALLOON);
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features;
}
fn queues(&self) -> &[Queue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [Queue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_evts
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.device_state
.active_state()
.expect("Device is not activated")
.interrupt
.deref()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
if let Some(config_space_bytes) = self.config_space.as_slice().get(u64_to_usize(offset)..) {
let len = config_space_bytes.len().min(data.len());
data[..len].copy_from_slice(&config_space_bytes[..len]);
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/balloon/test_utils.rs | src/vmm/src/devices/virtio/balloon/test_utils.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![doc(hidden)]
#[cfg(test)]
use crate::devices::virtio::balloon::Balloon;
#[cfg(test)]
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::test_utils::VirtQueue;
#[cfg(test)]
/// Max number of virtio queues.
const BALLOON_MAX_NUM_QUEUES: usize = 5;
#[cfg(test)]
pub fn invoke_handler_for_queue_event(b: &mut Balloon, queue_index: usize) {
use crate::devices::virtio::balloon::{DEFLATE_INDEX, INFLATE_INDEX, STATS_INDEX};
use crate::devices::virtio::transport::VirtioInterruptType;
let hinting_idx = b.free_page_hinting_idx();
let reporting_idx = b.free_page_reporting_idx();
assert!(queue_index < BALLOON_MAX_NUM_QUEUES);
// Trigger the queue event.
b.queue_evts[queue_index].write(1).unwrap();
// Handle event.
// Reporting -> hinting -> stats ordering is important as they will change
// depending on enabled features
match queue_index {
INFLATE_INDEX => b.process_inflate_queue_event().unwrap(),
DEFLATE_INDEX => b.process_deflate_queue_event().unwrap(),
reporting_idx if b.free_page_reporting() => {
b.process_free_page_reporting_queue_event().unwrap()
}
hinting_idx if b.free_page_hinting() => b.process_free_page_hinting_queue_event().unwrap(),
STATS_INDEX => b.process_stats_queue_event().unwrap(),
_ => unreachable!(),
};
// Validate the queue operation finished successfully.
let interrupt = b.interrupt_trigger();
assert!(
interrupt
.has_pending_interrupt(VirtioInterruptType::Queue(queue_index.try_into().unwrap()))
);
interrupt.ack_interrupt(VirtioInterruptType::Queue(queue_index.try_into().unwrap()));
}
pub fn set_request(queue: &VirtQueue, idx: u16, addr: u64, len: u32, flags: u16) {
// Set the index of the next request.
queue.avail.idx.set(idx + 1);
// Set the current descriptor table entry index.
queue.avail.ring[idx as usize].set(idx);
// Set the current descriptor table entry.
queue.dtable[idx as usize].set(addr, len, flags, 1);
}
pub fn check_request_completion(queue: &VirtQueue, idx: usize) {
// Check that the next used will be idx + 1.
assert_eq!(queue.used.idx.get() as usize, idx + 1);
// Check that the current used is idx.
assert_eq!(queue.used.ring[idx].get().id as usize, idx);
// The length of the completed request is 0.
assert_eq!(queue.used.ring[idx].get().len, 0);
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/balloon/util.rs | src/vmm/src/devices/virtio/balloon/util.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::io;
use super::{MAX_PAGE_COMPACT_BUFFER, RemoveRegionError};
use crate::logger::error;
use crate::utils::u64_to_usize;
use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestMemoryRegion};
/// This takes a vector of page frame numbers, and compacts them
/// into ranges of consecutive pages. The result is a vector
/// of (start_page_frame_number, range_length) pairs.
pub(crate) fn compact_page_frame_numbers(v: &mut [u32]) -> Vec<(u32, u32)> {
if v.is_empty() {
return vec![];
}
// Since the total number of pages that can be
// received at once is `MAX_PAGE_COMPACT_BUFFER`,
// this sort does not change the complexity of handling
// an inflation.
v.sort_unstable();
// Since there are at most `MAX_PAGE_COMPACT_BUFFER` pages, setting the
// capacity of `result` to this makes sense.
let mut result = Vec::with_capacity(MAX_PAGE_COMPACT_BUFFER);
// The most recent range of pages is [previous..previous + length).
let mut previous = 0;
let mut length = 1;
for pfn_index in 1..v.len() {
let page_frame_number = v[pfn_index];
// Skip duplicate pages. This will ensure we only consider
// distinct PFNs.
if page_frame_number == v[pfn_index - 1] {
error!("Skipping duplicate PFN {}.", page_frame_number);
continue;
}
// Check if the current page frame number is adjacent to the most recent page range.
// This operation will never overflow because for whatever value `v[previous]`
// has in the u32 range, we know there are at least `length` consecutive numbers
// greater than it in the array (the greatest so far being `page_frame_number`),
// since `v[previous]` is before all of them in the sorted array and `length`
// was incremented for each consecutive one. This is true only because we skip
// duplicates.
if page_frame_number == v[previous] + length {
// If so, extend that range.
length += 1;
} else {
// Otherwise, push (previous, length) to the result vector.
result.push((v[previous], length));
// And update the most recent range of pages.
previous = pfn_index;
length = 1;
}
}
// Don't forget to push the last range to the result.
result.push((v[previous], length));
result
}
#[cfg(test)]
mod tests {
use std::fmt::Debug;
use super::*;
use crate::vstate::memory::Bytes;
/// This asserts that $lhs matches $rhs.
macro_rules! assert_match {
($lhs:expr, $rhs:pat) => {{ assert!(matches!($lhs, $rhs)) }};
}
#[test]
fn test_compact_page_indices() {
// Test empty input.
assert!(compact_page_frame_numbers(&mut []).is_empty());
// Test single compact range.
assert_eq!(
compact_page_frame_numbers((0_u32..100_u32).collect::<Vec<u32>>().as_mut_slice()),
vec![(0, 100)]
);
// `compact_page_frame_numbers` works even when given out of order input.
assert_eq!(
compact_page_frame_numbers((0_u32..100_u32).rev().collect::<Vec<u32>>().as_mut_slice()),
vec![(0, 100)]
);
// Test with 100 distinct ranges.
assert_eq!(
compact_page_frame_numbers(
&mut (0_u32..10000_u32)
.step_by(100)
.flat_map(|x| (x..x + 10).rev())
.collect::<Vec<u32>>()
),
(0_u32..10000_u32)
.step_by(100)
.map(|x| (x, 10_u32))
.collect::<Vec<(u32, u32)>>()
);
// Test range with duplicates.
assert_eq!(
compact_page_frame_numbers(
&mut (0_u32..10000_u32).map(|x| x / 2).collect::<Vec<u32>>()
),
vec![(0, 5000)]
);
// Test there is no overflow when there are duplicate max values.
assert_eq!(
compact_page_frame_numbers(&mut [u32::MAX, u32::MAX]),
vec![(u32::MAX, 1)]
);
}
/// -------------------------------------
/// BEGIN PROPERTY BASED TESTING
use proptest::prelude::*;
use crate::test_utils::single_region_mem;
#[allow(clippy::let_with_type_underscore)]
fn random_pfn_u32_max() -> impl Strategy<Value = Vec<u32>> {
// Create a randomly sized vec (max MAX_PAGE_COMPACT_BUFFER elements) filled with random u32
// elements.
prop::collection::vec(0..u32::MAX, 0..MAX_PAGE_COMPACT_BUFFER)
}
#[allow(clippy::let_with_type_underscore)]
fn random_pfn_100() -> impl Strategy<Value = Vec<u32>> {
// Create a randomly sized vec (max MAX_PAGE_COMPACT_BUFFER/8) filled with random u32
// elements (0 - 100).
prop::collection::vec(0..100u32, 0..MAX_PAGE_COMPACT_BUFFER / 8)
}
// The uncompactor will output deduplicated and sorted elements as compaction algorithm
// guarantees it.
fn uncompact(compacted: Vec<(u32, u32)>) -> Vec<u32> {
let mut result = Vec::new();
for (start, len) in compacted {
result.extend(start..start + len);
}
result
}
fn sort_and_dedup<T: Ord + Clone + Debug>(v: &[T]) -> Vec<T> {
let mut sorted_v = v.to_vec();
sorted_v.sort_unstable();
sorted_v.dedup();
sorted_v
}
// The below prop tests will validate the following output properties:
// - vec elements are sorted by first tuple value
// - no pfn duplicates are present
// - no pfn is lost
#[test]
fn test_pfn_compact() {
let cfg = ProptestConfig::with_cases(1500);
proptest!(cfg, |(mut input1 in random_pfn_u32_max(), mut input2 in random_pfn_100())| {
// The uncompactor will output sorted elements.
prop_assert!(
uncompact(compact_page_frame_numbers(input1.as_mut_slice()))
== sort_and_dedup(input1.as_slice())
);
// Input2 will ensure duplicate PFN cases are also covered.
prop_assert!(
uncompact(compact_page_frame_numbers(input2.as_mut_slice()))
== sort_and_dedup(input2.as_slice())
);
});
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/balloon/mod.rs | src/vmm/src/devices/virtio/balloon/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Implements a virtio balloon device.
pub mod device;
mod event_handler;
pub mod metrics;
pub mod persist;
pub mod test_utils;
mod util;
use log::error;
pub use self::device::{Balloon, BalloonConfig, BalloonStats};
use super::queue::{InvalidAvailIdx, QueueError};
use crate::devices::virtio::balloon::metrics::METRICS;
use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE;
use crate::logger::IncMetric;
use crate::vstate::interrupts::InterruptError;
/// Device ID used in MMIO device identification.
/// Because Balloon is unique per-vm, this ID can be hardcoded.
pub const BALLOON_DEV_ID: &str = "balloon";
/// The size of the config space.
pub const BALLOON_CONFIG_SPACE_SIZE: usize = 12;
/// Min number of virtio queues.
pub const BALLOON_MIN_NUM_QUEUES: usize = 2;
/// Virtio queue size, in number of descriptor chain heads.
pub const BALLOON_QUEUE_SIZE: u16 = FIRECRACKER_MAX_QUEUE_SIZE;
// Number of 4K pages in a MiB.
pub const MIB_TO_4K_PAGES: u32 = 256;
/// The maximum number of pages that can be received in a single descriptor.
pub const MAX_PAGES_IN_DESC: usize = 256;
/// The maximum number of pages that can be compacted into ranges during process_inflate().
/// Needs to be a multiple of MAX_PAGES_IN_DESC.
pub const MAX_PAGE_COMPACT_BUFFER: usize = 2048;
/// The addresses given by the driver are divided by 4096.
pub const VIRTIO_BALLOON_PFN_SHIFT: u32 = 12;
/// The index of the inflate queue from Balloon device queues/queues_evts vector.
pub const INFLATE_INDEX: usize = 0;
/// The index of the deflate queue from Balloon device queues/queues_evts vector.
pub const DEFLATE_INDEX: usize = 1;
/// The index of the stats queue from Balloon device queues/queues_evts vector.
pub const STATS_INDEX: usize = 2;
/// Command used in free page hinting to indicate the guest has finished
pub const FREE_PAGE_HINT_STOP: u32 = 0;
/// Command used in free page hinting to indicate to the guest to release pages
pub const FREE_PAGE_HINT_DONE: u32 = 1;
// The feature bitmap for virtio balloon.
const VIRTIO_BALLOON_F_STATS_VQ: u32 = 1; // Enable statistics.
const VIRTIO_BALLOON_F_DEFLATE_ON_OOM: u32 = 2; // Deflate balloon on OOM.
const VIRTIO_BALLOON_F_FREE_PAGE_HINTING: u32 = 3; // Enable free page hinting
const VIRTIO_BALLOON_F_FREE_PAGE_REPORTING: u32 = 5; // Enable free page reporting
// The statistics tags. defined in linux "include/uapi/linux/virtio_balloon.h".
const VIRTIO_BALLOON_S_SWAP_IN: u16 = 0;
const VIRTIO_BALLOON_S_SWAP_OUT: u16 = 1;
const VIRTIO_BALLOON_S_MAJFLT: u16 = 2;
const VIRTIO_BALLOON_S_MINFLT: u16 = 3;
const VIRTIO_BALLOON_S_MEMFREE: u16 = 4;
const VIRTIO_BALLOON_S_MEMTOT: u16 = 5;
const VIRTIO_BALLOON_S_AVAIL: u16 = 6;
const VIRTIO_BALLOON_S_CACHES: u16 = 7;
const VIRTIO_BALLOON_S_HTLB_PGALLOC: u16 = 8;
const VIRTIO_BALLOON_S_HTLB_PGFAIL: u16 = 9;
const VIRTIO_BALLOON_S_OOM_KILL: u16 = 10;
const VIRTIO_BALLOON_S_ALLOC_STALL: u16 = 11;
const VIRTIO_BALLOON_S_ASYNC_SCAN: u16 = 12;
const VIRTIO_BALLOON_S_DIRECT_SCAN: u16 = 13;
const VIRTIO_BALLOON_S_ASYNC_RECLAIM: u16 = 14;
const VIRTIO_BALLOON_S_DIRECT_RECLAIM: u16 = 15;
/// Balloon device related errors.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum BalloonError {
/// Device not activated yet.
DeviceNotActive,
/// Attempting to use hinting when not enabled
HintingNotEnabled,
/// EventFd error: {0}
EventFd(std::io::Error),
/// Received error while sending an interrupt: {0}
InterruptError(InterruptError),
/// Guest gave us a malformed descriptor.
MalformedDescriptor,
/// Guest gave us a malformed payload.
MalformedPayload,
/// Error restoring the balloon device queues.
QueueRestoreError,
/// Received stats query when stats are disabled.
StatisticsDisabled,
/// Statistics cannot be enabled/disabled after activation.
StatisticsStateChange,
/// Requested memory should be less than {0}MiB
TooMuchMemoryRequested(u32),
/// Error while processing the virt queues: {0}
Queue(#[from] QueueError),
/// {0}
InvalidAvailIdx(#[from] InvalidAvailIdx),
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum RemoveRegionError {
/// Address translation error.
AddressTranslation,
/// Malformed guest address range.
MalformedRange,
/// Error calling madvise: {0}
MadviseFail(std::io::Error),
/// Error calling mmap: {0}
MmapFail(std::io::Error),
/// Region not found.
RegionNotFound,
}
pub(super) fn report_balloon_event_fail(err: BalloonError) {
if let BalloonError::InvalidAvailIdx(err) = err {
panic!("{}", err);
}
error!("{:?}", err);
METRICS.event_fails.inc();
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/balloon/event_handler.rs | src/vmm/src/devices/virtio/balloon/event_handler.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use event_manager::{EventOps, Events, MutEventSubscriber};
use vmm_sys_util::epoll::EventSet;
use super::{DEFLATE_INDEX, INFLATE_INDEX, STATS_INDEX, report_balloon_event_fail};
use crate::devices::virtio::balloon::device::Balloon;
use crate::devices::virtio::device::VirtioDevice;
use crate::logger::{error, warn};
impl Balloon {
const PROCESS_ACTIVATE: u32 = 0;
const PROCESS_VIRTQ_INFLATE: u32 = 1;
const PROCESS_VIRTQ_DEFLATE: u32 = 2;
const PROCESS_VIRTQ_STATS: u32 = 3;
const PROCESS_STATS_TIMER: u32 = 4;
const PROCESS_VIRTQ_FREE_PAGE_HINTING: u32 = 5;
const PROCESS_VIRTQ_FREE_PAGE_REPORTING: u32 = 6;
fn register_runtime_events(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.queue_evts[INFLATE_INDEX],
Self::PROCESS_VIRTQ_INFLATE,
EventSet::IN,
)) {
error!("Failed to register inflate queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.queue_evts[DEFLATE_INDEX],
Self::PROCESS_VIRTQ_DEFLATE,
EventSet::IN,
)) {
error!("Failed to register deflate queue event: {}", err);
}
if self.stats_enabled() {
if let Err(err) = ops.add(Events::with_data(
&self.queue_evts[STATS_INDEX],
Self::PROCESS_VIRTQ_STATS,
EventSet::IN,
)) {
error!("Failed to register stats queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.stats_timer,
Self::PROCESS_STATS_TIMER,
EventSet::IN,
)) {
error!("Failed to register stats timerfd event: {}", err);
}
}
if self.free_page_hinting()
&& let Err(err) = ops.add(Events::with_data(
&self.queue_evts[self.free_page_hinting_idx()],
Self::PROCESS_VIRTQ_FREE_PAGE_HINTING,
EventSet::IN,
))
{
error!("Failed to register free page hinting queue event: {}", err);
}
if self.free_page_reporting()
&& let Err(err) = ops.add(Events::with_data(
&self.queue_evts[self.free_page_reporting_idx()],
Self::PROCESS_VIRTQ_FREE_PAGE_REPORTING,
EventSet::IN,
))
{
error!(
"Failed to register free page reporting queue event: {}",
err
);
}
}
fn register_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to register activate event: {}", err);
}
}
fn process_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = self.activate_evt.read() {
error!("Failed to consume balloon activate event: {:?}", err);
}
self.register_runtime_events(ops);
if let Err(err) = ops.remove(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to un-register activate event: {}", err);
}
}
}
impl MutEventSubscriber for Balloon {
fn process(&mut self, event: Events, ops: &mut EventOps) {
let source = event.data();
let event_set = event.event_set();
let supported_events = EventSet::IN;
if !supported_events.contains(event_set) {
warn!(
"Received unknown event: {:?} from source: {:?}",
event_set, source
);
return;
}
if self.is_activated() {
match source {
Self::PROCESS_ACTIVATE => self.process_activate_event(ops),
Self::PROCESS_VIRTQ_INFLATE => self
.process_inflate_queue_event()
.unwrap_or_else(report_balloon_event_fail),
Self::PROCESS_VIRTQ_DEFLATE => self
.process_deflate_queue_event()
.unwrap_or_else(report_balloon_event_fail),
Self::PROCESS_VIRTQ_STATS => self
.process_stats_queue_event()
.unwrap_or_else(report_balloon_event_fail),
Self::PROCESS_STATS_TIMER => self
.process_stats_timer_event()
.unwrap_or_else(report_balloon_event_fail),
Self::PROCESS_VIRTQ_FREE_PAGE_HINTING => self
.process_free_page_hinting_queue_event()
.unwrap_or_else(report_balloon_event_fail),
Self::PROCESS_VIRTQ_FREE_PAGE_REPORTING => self
.process_free_page_reporting_queue_event()
.unwrap_or_else(report_balloon_event_fail),
_ => {
warn!("Balloon: Spurious event received: {:?}", source);
}
};
} else {
warn!(
"Balloon: The device is not yet activated. Spurious event received: {:?}",
source
);
}
}
fn init(&mut self, ops: &mut EventOps) {
// This function can be called during different points in the device lifetime:
// - shortly after device creation,
// - on device activation (is-activated already true at this point),
// - on device restore from snapshot.
if self.is_activated() {
self.register_runtime_events(ops);
} else {
self.register_activate_event(ops);
}
}
}
#[cfg(test)]
pub mod tests {
use std::sync::{Arc, Mutex};
use event_manager::{EventManager, SubscriberOps};
use super::*;
use crate::devices::virtio::balloon::test_utils::set_request;
use crate::devices::virtio::test_utils::{VirtQueue, default_interrupt, default_mem};
use crate::vstate::memory::GuestAddress;
#[test]
fn test_event_handler() {
let mut event_manager = EventManager::new().unwrap();
let mut balloon = Balloon::new(0, true, 10, false, false).unwrap();
let mem = default_mem();
let interrupt = default_interrupt();
let infq = VirtQueue::new(GuestAddress(0), &mem, 16);
balloon.set_queue(INFLATE_INDEX, infq.create_queue());
balloon.set_queue(DEFLATE_INDEX, infq.create_queue());
balloon.set_queue(STATS_INDEX, infq.create_queue());
let balloon = Arc::new(Mutex::new(balloon));
let _id = event_manager.add_subscriber(balloon.clone());
// Push a queue event, use the inflate queue in this test.
{
let addr = 0x100;
set_request(&infq, 0, addr, 4, 0);
balloon.lock().unwrap().queue_evts[INFLATE_INDEX]
.write(1)
.unwrap();
}
// EventManager should report no events since balloon has only registered
// its activation event so far (even though there is also a queue event pending).
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 0);
// Manually force a queue event and check it's ignored pre-activation.
{
let b = balloon.lock().unwrap();
// Artificially push event.
b.queue_evts[INFLATE_INDEX].write(1).unwrap();
// Process the pushed event.
let ev_count = event_manager.run_with_timeout(50).unwrap();
// Validate there was no queue operation.
assert_eq!(ev_count, 0);
assert_eq!(infq.used.idx.get(), 0);
}
// Now activate the device.
balloon
.lock()
.unwrap()
.activate(mem.clone(), interrupt)
.unwrap();
// Process the activate event.
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 1);
// Handle the previously pushed queue event through EventManager.
event_manager
.run_with_timeout(100)
.expect("Metrics event timeout or error.");
// Make sure the data queue advanced.
assert_eq!(infq.used.idx.get(), 1);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/balloon/metrics.rs | src/vmm/src/devices/virtio/balloon/metrics.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system for balloon devices.
//!
//! # Metrics format
//! The metrics are flushed in JSON when requested by vmm::logger::metrics::METRICS.write().
//!
//! ## JSON example with metrics:
//! ```json
//! "balloon": {
//! "activate_fails": "SharedIncMetric",
//! "inflate_count": "SharedIncMetric",
//! "stats_updates_count": "SharedIncMetric",
//! ...
//! }
//! }
//! ```
//! Each `balloon` field in the example above is a serializable `BalloonDeviceMetrics` structure
//! collecting metrics such as `activate_fails`, `inflate_count` etc. for the balloon device.
//! Since balloon doesn't support multiple devices, there is no per device metrics and
//! `balloon` represents the aggregate balloon metrics.
//!
//! # Design
//! The main design goals of this system are:
//! * Have a consistent approach of keeping device related metrics in the individual devices
//! modules.
//! * To decouple balloon device metrics from logger module by moving BalloonDeviceMetrics out of
//! FirecrackerDeviceMetrics.
//! * Rely on `serde` to provide the actual serialization for writing the metrics.
//!
//! The system implements 1 type of metrics:
//! * Shared Incremental Metrics (SharedIncMetrics) - dedicated for the metrics which need a counter
//! (i.e the number of times an API request failed). These metrics are reset upon flush.
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use crate::logger::SharedIncMetric;
/// Stores aggregated balloon metrics
pub(super) static METRICS: BalloonDeviceMetrics = BalloonDeviceMetrics::new();
/// Called by METRICS.flush(), this function facilitates serialization of balloon device metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let mut seq = serializer.serialize_map(Some(1))?;
seq.serialize_entry("balloon", &METRICS)?;
seq.end()
}
/// Balloon Device associated metrics.
#[derive(Debug, Serialize)]
pub(super) struct BalloonDeviceMetrics {
/// Number of times when activate failed on a balloon device.
pub activate_fails: SharedIncMetric,
/// Number of balloon device inflations.
pub inflate_count: SharedIncMetric,
// Number of balloon statistics updates from the driver.
pub stats_updates_count: SharedIncMetric,
// Number of balloon statistics update failures.
pub stats_update_fails: SharedIncMetric,
/// Number of balloon device deflations.
pub deflate_count: SharedIncMetric,
/// Number of times when handling events on a balloon device failed.
pub event_fails: SharedIncMetric,
/// Number of times when free page repoting was triggered
pub free_page_report_count: SharedIncMetric,
/// Total memory freed by the reporting driver
pub free_page_report_freed: SharedIncMetric,
/// Number of errors occurred while reporting
pub free_page_report_fails: SharedIncMetric,
/// Number of times when free page hinting was triggered
pub free_page_hint_count: SharedIncMetric,
/// Total memory freed by the hinting driver
pub free_page_hint_freed: SharedIncMetric,
/// Number of errors occurred while hinting
pub free_page_hint_fails: SharedIncMetric,
}
impl BalloonDeviceMetrics {
/// Const default construction.
const fn new() -> Self {
Self {
activate_fails: SharedIncMetric::new(),
inflate_count: SharedIncMetric::new(),
stats_updates_count: SharedIncMetric::new(),
stats_update_fails: SharedIncMetric::new(),
deflate_count: SharedIncMetric::new(),
event_fails: SharedIncMetric::new(),
free_page_report_count: SharedIncMetric::new(),
free_page_report_freed: SharedIncMetric::new(),
free_page_report_fails: SharedIncMetric::new(),
free_page_hint_count: SharedIncMetric::new(),
free_page_hint_freed: SharedIncMetric::new(),
free_page_hint_fails: SharedIncMetric::new(),
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::logger::IncMetric;
#[test]
fn test_balloon_dev_metrics() {
let balloon_metrics: BalloonDeviceMetrics = BalloonDeviceMetrics::new();
let balloon_metrics_local: String = serde_json::to_string(&balloon_metrics).unwrap();
// the 1st serialize flushes the metrics and resets values to 0 so that
// we can compare the values with local metrics.
serde_json::to_string(&METRICS).unwrap();
let balloon_metrics_global: String = serde_json::to_string(&METRICS).unwrap();
assert_eq!(balloon_metrics_local, balloon_metrics_global);
balloon_metrics.inflate_count.inc();
assert_eq!(balloon_metrics.inflate_count.count(), 1);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/persist.rs | src/vmm/src/devices/virtio/block/persist.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use super::vhost_user::persist::VhostUserBlockState;
use super::virtio::persist::VirtioBlockState;
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::vstate::memory::GuestMemoryMmap;
/// Block device state.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BlockState {
Virtio(VirtioBlockState),
VhostUser(VhostUserBlockState),
}
impl BlockState {
pub fn is_activated(&self) -> bool {
match self {
BlockState::Virtio(virtio_block_state) => virtio_block_state.virtio_state.activated,
BlockState::VhostUser(vhost_user_block_state) => false,
}
}
}
/// Auxiliary structure for creating a device when resuming from a snapshot.
#[derive(Debug)]
pub struct BlockConstructorArgs {
pub mem: GuestMemoryMmap,
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/device.rs | src/vmm/src/devices/virtio/block/device.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use event_manager::{EventOps, Events, MutEventSubscriber};
use log::info;
use vmm_sys_util::eventfd::EventFd;
use super::BlockError;
use super::persist::{BlockConstructorArgs, BlockState};
use super::vhost_user::device::{VhostUserBlock, VhostUserBlockConfig};
use super::virtio::device::{VirtioBlock, VirtioBlockConfig};
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_BLOCK;
use crate::devices::virtio::queue::{InvalidAvailIdx, Queue};
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::impl_device_type;
use crate::rate_limiter::BucketUpdate;
use crate::snapshot::Persist;
use crate::vmm_config::drive::BlockDeviceConfig;
use crate::vstate::memory::GuestMemoryMmap;
// Clippy thinks that values of the enum are too different in size.
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub enum Block {
Virtio(VirtioBlock),
VhostUser(VhostUserBlock),
}
impl Block {
pub fn new(config: BlockDeviceConfig) -> Result<Block, BlockError> {
if let Ok(config) = VirtioBlockConfig::try_from(&config) {
Ok(Self::Virtio(
VirtioBlock::new(config).map_err(BlockError::VirtioBackend)?,
))
} else if let Ok(config) = VhostUserBlockConfig::try_from(&config) {
Ok(Self::VhostUser(
VhostUserBlock::new(config).map_err(BlockError::VhostUserBackend)?,
))
} else {
Err(BlockError::InvalidBlockConfig)
}
}
pub fn config(&self) -> BlockDeviceConfig {
match self {
Self::Virtio(b) => b.config().into(),
Self::VhostUser(b) => b.config().into(),
}
}
pub fn update_disk_image(&mut self, disk_image_path: String) -> Result<(), BlockError> {
match self {
Self::Virtio(b) => b
.update_disk_image(disk_image_path)
.map_err(BlockError::VirtioBackend),
Self::VhostUser(_) => Err(BlockError::InvalidBlockBackend),
}
}
pub fn update_rate_limiter(
&mut self,
bytes: BucketUpdate,
ops: BucketUpdate,
) -> Result<(), BlockError> {
match self {
Self::Virtio(b) => {
b.update_rate_limiter(bytes, ops);
Ok(())
}
Self::VhostUser(_) => Err(BlockError::InvalidBlockBackend),
}
}
pub fn update_config(&mut self) -> Result<(), BlockError> {
match self {
Self::Virtio(_) => Err(BlockError::InvalidBlockBackend),
Self::VhostUser(b) => b.config_update().map_err(BlockError::VhostUserBackend),
}
}
pub fn prepare_save(&mut self) {
match self {
Self::Virtio(b) => b.prepare_save(),
Self::VhostUser(b) => b.prepare_save(),
}
}
pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> {
match self {
Self::Virtio(b) => b.process_virtio_queues(),
Self::VhostUser(_) => Ok(()),
}
}
pub fn id(&self) -> &str {
match self {
Self::Virtio(b) => &b.id,
Self::VhostUser(b) => &b.id,
}
}
pub fn root_device(&self) -> bool {
match self {
Self::Virtio(b) => b.root_device,
Self::VhostUser(b) => b.root_device,
}
}
pub fn read_only(&self) -> bool {
match self {
Self::Virtio(b) => b.read_only,
Self::VhostUser(b) => b.read_only,
}
}
pub fn partuuid(&self) -> &Option<String> {
match self {
Self::Virtio(b) => &b.partuuid,
Self::VhostUser(b) => &b.partuuid,
}
}
pub fn is_vhost_user(&self) -> bool {
match self {
Self::Virtio(_) => false,
Self::VhostUser(_) => true,
}
}
}
impl VirtioDevice for Block {
impl_device_type!(VIRTIO_ID_BLOCK);
fn avail_features(&self) -> u64 {
match self {
Self::Virtio(b) => b.avail_features,
Self::VhostUser(b) => b.avail_features,
}
}
fn acked_features(&self) -> u64 {
match self {
Self::Virtio(b) => b.acked_features,
Self::VhostUser(b) => b.acked_features,
}
}
fn set_acked_features(&mut self, acked_features: u64) {
match self {
Self::Virtio(b) => b.acked_features = acked_features,
Self::VhostUser(b) => b.acked_features = acked_features,
}
}
fn queues(&self) -> &[Queue] {
match self {
Self::Virtio(b) => &b.queues,
Self::VhostUser(b) => &b.queues,
}
}
fn queues_mut(&mut self) -> &mut [Queue] {
match self {
Self::Virtio(b) => &mut b.queues,
Self::VhostUser(b) => &mut b.queues,
}
}
fn queue_events(&self) -> &[EventFd] {
match self {
Self::Virtio(b) => &b.queue_evts,
Self::VhostUser(b) => &b.queue_evts,
}
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
match self {
Self::Virtio(b) => b.interrupt_trigger(),
Self::VhostUser(b) => b.interrupt_trigger(),
}
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
match self {
Self::Virtio(b) => b.read_config(offset, data),
Self::VhostUser(b) => b.read_config(offset, data),
}
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
match self {
Self::Virtio(b) => b.write_config(offset, data),
Self::VhostUser(b) => b.write_config(offset, data),
}
}
fn activate(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
match self {
Self::Virtio(b) => b.activate(mem, interrupt),
Self::VhostUser(b) => b.activate(mem, interrupt),
}
}
fn is_activated(&self) -> bool {
match self {
Self::Virtio(b) => b.device_state.is_activated(),
Self::VhostUser(b) => b.device_state.is_activated(),
}
}
fn kick(&mut self) {
// If device is activated, kick the block queue(s) to make up for any
// pending or in-flight epoll events we may have not captured in
// snapshot. No need to kick Ratelimiters
// because they are restored 'unblocked' so
// any inflight `timer_fd` events can be safely discarded.
if self.is_activated() {
info!("kick block {}.", self.id());
self.process_virtio_queues();
}
}
}
impl MutEventSubscriber for Block {
fn process(&mut self, event: Events, ops: &mut EventOps) {
match self {
Self::Virtio(b) => b.process(event, ops),
Self::VhostUser(b) => b.process(event, ops),
}
}
fn init(&mut self, ops: &mut EventOps) {
match self {
Self::Virtio(b) => b.init(ops),
Self::VhostUser(b) => b.init(ops),
}
}
}
impl Persist<'_> for Block {
type State = BlockState;
type ConstructorArgs = BlockConstructorArgs;
type Error = BlockError;
fn save(&self) -> Self::State {
match self {
Self::Virtio(b) => BlockState::Virtio(b.save()),
Self::VhostUser(b) => BlockState::VhostUser(b.save()),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
match state {
BlockState::Virtio(s) => Ok(Self::Virtio(
VirtioBlock::restore(constructor_args, s).map_err(BlockError::VirtioBackend)?,
)),
BlockState::VhostUser(s) => Ok(Self::VhostUser(
VhostUserBlock::restore(constructor_args, s)
.map_err(BlockError::VhostUserBackend)?,
)),
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/mod.rs | src/vmm/src/devices/virtio/block/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::{Deserialize, Serialize};
use self::vhost_user::VhostUserBlockError;
use self::virtio::VirtioBlockError;
pub mod device;
pub mod persist;
pub mod vhost_user;
pub mod virtio;
/// Configuration options for disk caching.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
pub enum CacheType {
/// Flushing mechanic not will be advertised to the guest driver
#[default]
Unsafe,
/// Flushing mechanic will be advertised to the guest driver and
/// flush requests coming from the guest will be performed using
/// `fsync`.
Writeback,
}
/// Errors the block device can trigger.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum BlockError {
/// Invalid block config.
InvalidBlockConfig,
/// Running method expected different backend.
InvalidBlockBackend,
/// Can not restore any backend.
BackendRestore,
/// Virtio backend error: {0}
VirtioBackend(VirtioBlockError),
/// Vhost user backend error: {0}
VhostUserBackend(VhostUserBlockError),
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/persist.rs | src/vmm/src/devices/virtio/block/virtio/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring block devices.
use device::ConfigSpace;
use serde::{Deserialize, Serialize};
use vmm_sys_util::eventfd::EventFd;
use super::device::DiskProperties;
use super::*;
use crate::devices::virtio::block::persist::BlockConstructorArgs;
use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::devices::virtio::block::virtio::metrics::BlockMetricsPerDevice;
use crate::devices::virtio::device::{ActiveState, DeviceState};
use crate::devices::virtio::generated::virtio_blk::VIRTIO_BLK_F_RO;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_BLOCK;
use crate::devices::virtio::persist::VirtioDeviceState;
use crate::rate_limiter::RateLimiter;
use crate::rate_limiter::persist::RateLimiterState;
use crate::snapshot::Persist;
/// Holds info about block's file engine type. Gets saved in snapshot.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub enum FileEngineTypeState {
/// Sync File Engine.
// If the snap version does not contain the `FileEngineType`, it must have been snapshotted
// on a VM using the Sync backend.
#[default]
Sync,
/// Async File Engine.
Async,
}
impl From<FileEngineType> for FileEngineTypeState {
fn from(file_engine_type: FileEngineType) -> Self {
match file_engine_type {
FileEngineType::Sync => FileEngineTypeState::Sync,
FileEngineType::Async => FileEngineTypeState::Async,
}
}
}
impl From<FileEngineTypeState> for FileEngineType {
fn from(file_engine_type_state: FileEngineTypeState) -> Self {
match file_engine_type_state {
FileEngineTypeState::Sync => FileEngineType::Sync,
FileEngineTypeState::Async => FileEngineType::Async,
}
}
}
/// Holds info about the block device. Gets saved in snapshot.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VirtioBlockState {
id: String,
partuuid: Option<String>,
cache_type: CacheType,
root_device: bool,
disk_path: String,
pub virtio_state: VirtioDeviceState,
rate_limiter_state: RateLimiterState,
file_engine_type: FileEngineTypeState,
}
impl Persist<'_> for VirtioBlock {
type State = VirtioBlockState;
type ConstructorArgs = BlockConstructorArgs;
type Error = VirtioBlockError;
fn save(&self) -> Self::State {
// Save device state.
VirtioBlockState {
id: self.id.clone(),
partuuid: self.partuuid.clone(),
cache_type: self.cache_type,
root_device: self.root_device,
disk_path: self.disk.file_path.clone(),
virtio_state: VirtioDeviceState::from_device(self),
rate_limiter_state: self.rate_limiter.save(),
file_engine_type: FileEngineTypeState::from(self.file_engine_type()),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let is_read_only = state.virtio_state.avail_features & (1u64 << VIRTIO_BLK_F_RO) != 0;
let rate_limiter = RateLimiter::restore((), &state.rate_limiter_state)
.map_err(VirtioBlockError::RateLimiter)?;
let disk_properties = DiskProperties::new(
state.disk_path.clone(),
is_read_only,
state.file_engine_type.into(),
)?;
let queue_evts = [EventFd::new(libc::EFD_NONBLOCK).map_err(VirtioBlockError::EventFd)?];
let queues = state
.virtio_state
.build_queues_checked(
&constructor_args.mem,
VIRTIO_ID_BLOCK,
BLOCK_NUM_QUEUES,
FIRECRACKER_MAX_QUEUE_SIZE,
)
.map_err(VirtioBlockError::Persist)?;
let avail_features = state.virtio_state.avail_features;
let acked_features = state.virtio_state.acked_features;
let config_space = ConfigSpace {
capacity: disk_properties.nsectors.to_le(),
};
Ok(VirtioBlock {
avail_features,
acked_features,
config_space,
activate_evt: EventFd::new(libc::EFD_NONBLOCK).map_err(VirtioBlockError::EventFd)?,
queues,
queue_evts,
device_state: DeviceState::Inactive,
id: state.id.clone(),
partuuid: state.partuuid.clone(),
cache_type: state.cache_type,
root_device: state.root_device,
read_only: is_read_only,
disk: disk_properties,
rate_limiter,
is_io_engine_throttled: false,
metrics: BlockMetricsPerDevice::alloc(state.id.clone()),
})
}
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::devices::virtio::block::virtio::device::VirtioBlockConfig;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::test_utils::{default_interrupt, default_mem};
use crate::snapshot::Snapshot;
#[test]
fn test_cache_semantic_ser() {
// We create the backing file here so that it exists for the whole lifetime of the test.
let f = TempFile::new().unwrap();
f.as_file().set_len(0x1000).unwrap();
let config = VirtioBlockConfig {
drive_id: "test".to_string(),
path_on_host: f.as_path().to_str().unwrap().to_string(),
is_root_device: false,
partuuid: None,
is_read_only: false,
cache_type: CacheType::Writeback,
rate_limiter: None,
file_engine_type: FileEngineType::default(),
};
let block = VirtioBlock::new(config).unwrap();
// Save the block device.
let mut mem = vec![0; 4096];
Snapshot::new(block.save())
.save(&mut mem.as_mut_slice())
.unwrap();
}
#[test]
fn test_file_engine_type() {
// Test conversions between FileEngineType and FileEngineTypeState.
assert_eq!(
FileEngineTypeState::Async,
FileEngineTypeState::from(FileEngineType::Async)
);
assert_eq!(
FileEngineTypeState::Sync,
FileEngineTypeState::from(FileEngineType::Sync)
);
assert_eq!(FileEngineType::Async, FileEngineTypeState::Async.into());
assert_eq!(FileEngineType::Sync, FileEngineTypeState::Sync.into());
// Test default impl.
assert_eq!(FileEngineTypeState::default(), FileEngineTypeState::Sync);
}
#[test]
fn test_persistence() {
// We create the backing file here so that it exists for the whole lifetime of the test.
let f = TempFile::new().unwrap();
f.as_file().set_len(0x1000).unwrap();
let config = VirtioBlockConfig {
drive_id: "test".to_string(),
path_on_host: f.as_path().to_str().unwrap().to_string(),
is_root_device: false,
partuuid: None,
is_read_only: false,
cache_type: CacheType::Unsafe,
rate_limiter: None,
file_engine_type: FileEngineType::default(),
};
let block = VirtioBlock::new(config).unwrap();
let guest_mem = default_mem();
// Save the block device.
let mut mem = vec![0; 4096];
Snapshot::new(block.save())
.save(&mut mem.as_mut_slice())
.unwrap();
// Restore the block device.
let restored_block = VirtioBlock::restore(
BlockConstructorArgs { mem: guest_mem },
&Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data,
)
.unwrap();
// Test that virtio specific fields are the same.
assert_eq!(restored_block.device_type(), VIRTIO_ID_BLOCK);
assert_eq!(restored_block.avail_features(), block.avail_features());
assert_eq!(restored_block.acked_features(), block.acked_features());
assert_eq!(restored_block.queues(), block.queues());
assert!(!block.is_activated());
assert!(!restored_block.is_activated());
// Test that block specific fields are the same.
assert_eq!(restored_block.disk.file_path, block.disk.file_path);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/device.rs | src/vmm/src/devices/virtio/block/virtio/device.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::cmp;
use std::convert::From;
use std::fs::{File, OpenOptions};
use std::io::{Seek, SeekFrom};
use std::ops::Deref;
use std::os::linux::fs::MetadataExt;
use std::path::PathBuf;
use std::sync::Arc;
use block_io::FileEngine;
use serde::{Deserialize, Serialize};
use vm_memory::ByteValued;
use vmm_sys_util::eventfd::EventFd;
use super::io::async_io;
use super::request::*;
use super::{BLOCK_QUEUE_SIZES, SECTOR_SHIFT, SECTOR_SIZE, VirtioBlockError, io as block_io};
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::block::CacheType;
use crate::devices::virtio::block::virtio::metrics::{BlockDeviceMetrics, BlockMetricsPerDevice};
use crate::devices::virtio::device::{ActiveState, DeviceState, VirtioDevice};
use crate::devices::virtio::generated::virtio_blk::{
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_RO, VIRTIO_BLK_ID_BYTES,
};
use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_BLOCK;
use crate::devices::virtio::generated::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use crate::devices::virtio::queue::{InvalidAvailIdx, Queue};
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::impl_device_type;
use crate::logger::{IncMetric, error, warn};
use crate::rate_limiter::{BucketUpdate, RateLimiter};
use crate::utils::u64_to_usize;
use crate::vmm_config::RateLimiterConfig;
use crate::vmm_config::drive::BlockDeviceConfig;
use crate::vstate::memory::GuestMemoryMmap;
/// The engine file type, either Sync or Async (through io_uring).
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
pub enum FileEngineType {
/// Use an Async engine, based on io_uring.
Async,
/// Use a Sync engine, based on blocking system calls.
#[default]
Sync,
}
/// Helper object for setting up all `Block` fields derived from its backing file.
#[derive(Debug)]
pub struct DiskProperties {
pub file_path: String,
pub file_engine: FileEngine,
pub nsectors: u64,
pub image_id: [u8; VIRTIO_BLK_ID_BYTES as usize],
}
impl DiskProperties {
// Helper function that opens the file with the proper access permissions
fn open_file(disk_image_path: &str, is_disk_read_only: bool) -> Result<File, VirtioBlockError> {
OpenOptions::new()
.read(true)
.write(!is_disk_read_only)
.open(PathBuf::from(&disk_image_path))
.map_err(|x| VirtioBlockError::BackingFile(x, disk_image_path.to_string()))
}
// Helper function that gets the size of the file
fn file_size(disk_image_path: &str, disk_image: &mut File) -> Result<u64, VirtioBlockError> {
let disk_size = disk_image
.seek(SeekFrom::End(0))
.map_err(|x| VirtioBlockError::BackingFile(x, disk_image_path.to_string()))?;
// We only support disk size, which uses the first two words of the configuration space.
// If the image is not a multiple of the sector size, the tail bits are not exposed.
if disk_size % u64::from(SECTOR_SIZE) != 0 {
warn!(
"Disk size {} is not a multiple of sector size {}; the remainder will not be \
visible to the guest.",
disk_size, SECTOR_SIZE
);
}
Ok(disk_size)
}
/// Create a new file for the block device using a FileEngine
pub fn new(
disk_image_path: String,
is_disk_read_only: bool,
file_engine_type: FileEngineType,
) -> Result<Self, VirtioBlockError> {
let mut disk_image = Self::open_file(&disk_image_path, is_disk_read_only)?;
let disk_size = Self::file_size(&disk_image_path, &mut disk_image)?;
let image_id = Self::build_disk_image_id(&disk_image);
Ok(Self {
file_path: disk_image_path,
file_engine: FileEngine::from_file(disk_image, file_engine_type)
.map_err(VirtioBlockError::FileEngine)?,
nsectors: disk_size >> SECTOR_SHIFT,
image_id,
})
}
/// Update the path to the file backing the block device
pub fn update(
&mut self,
disk_image_path: String,
is_disk_read_only: bool,
) -> Result<(), VirtioBlockError> {
let mut disk_image = Self::open_file(&disk_image_path, is_disk_read_only)?;
let disk_size = Self::file_size(&disk_image_path, &mut disk_image)?;
self.image_id = Self::build_disk_image_id(&disk_image);
self.file_engine
.update_file_path(disk_image)
.map_err(VirtioBlockError::FileEngine)?;
self.nsectors = disk_size >> SECTOR_SHIFT;
self.file_path = disk_image_path;
Ok(())
}
fn build_device_id(disk_file: &File) -> Result<String, VirtioBlockError> {
let blk_metadata = disk_file
.metadata()
.map_err(VirtioBlockError::GetFileMetadata)?;
// This is how kvmtool does it.
let device_id = format!(
"{}{}{}",
blk_metadata.st_dev(),
blk_metadata.st_rdev(),
blk_metadata.st_ino()
);
Ok(device_id)
}
fn build_disk_image_id(disk_file: &File) -> [u8; VIRTIO_BLK_ID_BYTES as usize] {
let mut default_id = [0; VIRTIO_BLK_ID_BYTES as usize];
match Self::build_device_id(disk_file) {
Err(_) => {
warn!("Could not generate device id. We'll use a default.");
}
Ok(disk_id_string) => {
// The kernel only knows to read a maximum of VIRTIO_BLK_ID_BYTES.
// This will also zero out any leftover bytes.
let disk_id = disk_id_string.as_bytes();
let bytes_to_copy = cmp::min(disk_id.len(), VIRTIO_BLK_ID_BYTES as usize);
default_id[..bytes_to_copy].copy_from_slice(&disk_id[..bytes_to_copy]);
}
}
default_id
}
}
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)]
#[repr(C)]
pub struct ConfigSpace {
pub capacity: u64,
}
// SAFETY: `ConfigSpace` contains only PODs in `repr(C)` or `repr(transparent)`, without padding.
unsafe impl ByteValued for ConfigSpace {}
/// Use this structure to set up the Block Device before booting the kernel.
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct VirtioBlockConfig {
/// Unique identifier of the drive.
pub drive_id: String,
/// Part-UUID. Represents the unique id of the boot partition of this device. It is
/// optional and it will be used only if the `is_root_device` field is true.
pub partuuid: Option<String>,
/// If set to true, it makes the current device the root block device.
/// Setting this flag to true will mount the block device in the
/// guest under /dev/vda unless the partuuid is present.
pub is_root_device: bool,
/// If set to true, the drive will ignore flush requests coming from
/// the guest driver.
#[serde(default)]
pub cache_type: CacheType,
/// If set to true, the drive is opened in read-only mode. Otherwise, the
/// drive is opened as read-write.
pub is_read_only: bool,
/// Path of the backing file on the host
pub path_on_host: String,
/// Rate Limiter for I/O operations.
pub rate_limiter: Option<RateLimiterConfig>,
/// The type of IO engine used by the device.
#[serde(default)]
#[serde(rename = "io_engine")]
pub file_engine_type: FileEngineType,
}
impl TryFrom<&BlockDeviceConfig> for VirtioBlockConfig {
type Error = VirtioBlockError;
fn try_from(value: &BlockDeviceConfig) -> Result<Self, Self::Error> {
if value.path_on_host.is_some() && value.socket.is_none() {
Ok(Self {
drive_id: value.drive_id.clone(),
partuuid: value.partuuid.clone(),
is_root_device: value.is_root_device,
cache_type: value.cache_type,
is_read_only: value.is_read_only.unwrap_or(false),
path_on_host: value.path_on_host.as_ref().unwrap().clone(),
rate_limiter: value.rate_limiter,
file_engine_type: value.file_engine_type.unwrap_or_default(),
})
} else {
Err(VirtioBlockError::Config)
}
}
}
impl From<VirtioBlockConfig> for BlockDeviceConfig {
fn from(value: VirtioBlockConfig) -> Self {
Self {
drive_id: value.drive_id,
partuuid: value.partuuid,
is_root_device: value.is_root_device,
cache_type: value.cache_type,
is_read_only: Some(value.is_read_only),
path_on_host: Some(value.path_on_host),
rate_limiter: value.rate_limiter,
file_engine_type: Some(value.file_engine_type),
socket: None,
}
}
}
/// Virtio device for exposing block level read/write operations on a host file.
#[derive(Debug)]
pub struct VirtioBlock {
// Virtio fields.
pub avail_features: u64,
pub acked_features: u64,
pub config_space: ConfigSpace,
pub activate_evt: EventFd,
// Transport related fields.
pub queues: Vec<Queue>,
pub queue_evts: [EventFd; 1],
pub device_state: DeviceState,
// Implementation specific fields.
pub id: String,
pub partuuid: Option<String>,
pub cache_type: CacheType,
pub root_device: bool,
pub read_only: bool,
// Host file and properties.
pub disk: DiskProperties,
pub rate_limiter: RateLimiter,
pub is_io_engine_throttled: bool,
pub metrics: Arc<BlockDeviceMetrics>,
}
macro_rules! unwrap_async_file_engine_or_return {
($file_engine: expr) => {
match $file_engine {
FileEngine::Async(engine) => engine,
FileEngine::Sync(_) => {
error!("The block device doesn't use an async IO engine");
return;
}
}
};
}
impl VirtioBlock {
/// Create a new virtio block device that operates on the given file.
///
/// The given file must be seekable and sizable.
pub fn new(config: VirtioBlockConfig) -> Result<VirtioBlock, VirtioBlockError> {
let disk_properties = DiskProperties::new(
config.path_on_host,
config.is_read_only,
config.file_engine_type,
)?;
let rate_limiter = config
.rate_limiter
.map(RateLimiterConfig::try_into)
.transpose()
.map_err(VirtioBlockError::RateLimiter)?
.unwrap_or_default();
let mut avail_features = (1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_RING_F_EVENT_IDX);
if config.cache_type == CacheType::Writeback {
avail_features |= 1u64 << VIRTIO_BLK_F_FLUSH;
}
if config.is_read_only {
avail_features |= 1u64 << VIRTIO_BLK_F_RO;
};
let queue_evts = [EventFd::new(libc::EFD_NONBLOCK).map_err(VirtioBlockError::EventFd)?];
let queues = BLOCK_QUEUE_SIZES.iter().map(|&s| Queue::new(s)).collect();
let config_space = ConfigSpace {
capacity: disk_properties.nsectors.to_le(),
};
Ok(VirtioBlock {
avail_features,
acked_features: 0u64,
config_space,
activate_evt: EventFd::new(libc::EFD_NONBLOCK).map_err(VirtioBlockError::EventFd)?,
queues,
queue_evts,
device_state: DeviceState::Inactive,
id: config.drive_id.clone(),
partuuid: config.partuuid,
cache_type: config.cache_type,
root_device: config.is_root_device,
read_only: config.is_read_only,
disk: disk_properties,
rate_limiter,
is_io_engine_throttled: false,
metrics: BlockMetricsPerDevice::alloc(config.drive_id),
})
}
/// Returns a copy of a device config
pub fn config(&self) -> VirtioBlockConfig {
let rl: RateLimiterConfig = (&self.rate_limiter).into();
VirtioBlockConfig {
drive_id: self.id.clone(),
path_on_host: self.disk.file_path.clone(),
is_root_device: self.root_device,
partuuid: self.partuuid.clone(),
is_read_only: self.read_only,
cache_type: self.cache_type,
rate_limiter: rl.into_option(),
file_engine_type: self.file_engine_type(),
}
}
/// Process a single event in the Virtio queue.
///
/// This function is called by the event manager when the guest notifies us
/// about new buffers in the queue.
pub(crate) fn process_queue_event(&mut self) {
self.metrics.queue_event_count.inc();
if let Err(err) = self.queue_evts[0].read() {
error!("Failed to get queue event: {:?}", err);
self.metrics.event_fails.inc();
} else if self.rate_limiter.is_blocked() {
self.metrics.rate_limiter_throttled_events.inc();
} else if self.is_io_engine_throttled {
self.metrics.io_engine_throttled_events.inc();
} else {
self.process_virtio_queues().unwrap()
}
}
/// Process device virtio queue(s).
pub fn process_virtio_queues(&mut self) -> Result<(), InvalidAvailIdx> {
self.process_queue(0)
}
pub(crate) fn process_rate_limiter_event(&mut self) {
self.metrics.rate_limiter_event_count.inc();
// Upon rate limiter event, call the rate limiter handler
// and restart processing the queue.
if self.rate_limiter.event_handler().is_ok() {
self.process_queue(0).unwrap()
}
}
/// Device specific function for peaking inside a queue and processing descriptors.
pub fn process_queue(&mut self, queue_index: usize) -> Result<(), InvalidAvailIdx> {
// This is safe since we checked in the event handler that the device is activated.
let active_state = self.device_state.active_state().unwrap();
let queue = &mut self.queues[queue_index];
let mut used_any = false;
while let Some(head) = queue.pop_or_enable_notification()? {
self.metrics.remaining_reqs_count.add(queue.len().into());
let processing_result =
match Request::parse(&head, &active_state.mem, self.disk.nsectors) {
Ok(request) => {
if request.rate_limit(&mut self.rate_limiter) {
// Stop processing the queue and return this descriptor chain to the
// avail ring, for later processing.
queue.undo_pop();
self.metrics.rate_limiter_throttled_events.inc();
break;
}
request.process(
&mut self.disk,
head.index,
&active_state.mem,
&self.metrics,
)
}
Err(err) => {
error!("Failed to parse available descriptor chain: {:?}", err);
self.metrics.execute_fails.inc();
ProcessingResult::Executed(FinishedRequest {
num_bytes_to_mem: 0,
desc_idx: head.index,
})
}
};
match processing_result {
ProcessingResult::Submitted => {}
ProcessingResult::Throttled => {
queue.undo_pop();
self.is_io_engine_throttled = true;
break;
}
ProcessingResult::Executed(finished) => {
used_any = true;
queue
.add_used(head.index, finished.num_bytes_to_mem)
.unwrap_or_else(|err| {
error!(
"Failed to add available descriptor head {}: {}",
head.index, err
)
});
}
}
}
queue.advance_used_ring_idx();
if used_any && queue.prepare_kick() {
active_state
.interrupt
.trigger(VirtioInterruptType::Queue(0))
.unwrap_or_else(|_| {
self.metrics.event_fails.inc();
});
}
if let FileEngine::Async(ref mut engine) = self.disk.file_engine
&& let Err(err) = engine.kick_submission_queue()
{
error!("BlockError submitting pending block requests: {:?}", err);
}
if !used_any {
self.metrics.no_avail_buffer.inc();
}
Ok(())
}
fn process_async_completion_queue(&mut self) {
let engine = unwrap_async_file_engine_or_return!(&mut self.disk.file_engine);
// This is safe since we checked in the event handler that the device is activated.
let active_state = self.device_state.active_state().unwrap();
let queue = &mut self.queues[0];
loop {
match engine.pop(&active_state.mem) {
Err(error) => {
error!("Failed to read completed io_uring entry: {:?}", error);
break;
}
Ok(None) => break,
Ok(Some(cqe)) => {
let res = cqe.result();
let user_data = cqe.user_data();
let (pending, res) = match res {
Ok(count) => (user_data, Ok(count)),
Err(error) => (
user_data,
Err(IoErr::FileEngine(block_io::BlockIoError::Async(
async_io::AsyncIoError::IO(error),
))),
),
};
let finished = pending.finish(&active_state.mem, res, &self.metrics);
queue
.add_used(finished.desc_idx, finished.num_bytes_to_mem)
.unwrap_or_else(|err| {
error!(
"Failed to add available descriptor head {}: {}",
finished.desc_idx, err
)
});
}
}
}
queue.advance_used_ring_idx();
if queue.prepare_kick() {
active_state
.interrupt
.trigger(VirtioInterruptType::Queue(0))
.unwrap_or_else(|_| {
self.metrics.event_fails.inc();
});
}
}
pub fn process_async_completion_event(&mut self) {
let engine = unwrap_async_file_engine_or_return!(&mut self.disk.file_engine);
if let Err(err) = engine.completion_evt().read() {
error!("Failed to get async completion event: {:?}", err);
} else {
self.process_async_completion_queue();
if self.is_io_engine_throttled {
self.is_io_engine_throttled = false;
self.process_queue(0).unwrap()
}
}
}
/// Update the backing file and the config space of the block device.
pub fn update_disk_image(&mut self, disk_image_path: String) -> Result<(), VirtioBlockError> {
self.disk.update(disk_image_path, self.read_only)?;
self.config_space.capacity = self.disk.nsectors.to_le(); // virtio_block_config_space();
// Kick the driver to pick up the changes. (But only if the device is already activated).
if self.is_activated() {
self.interrupt_trigger()
.trigger(VirtioInterruptType::Config)
.unwrap();
}
self.metrics.update_count.inc();
Ok(())
}
/// Updates the parameters for the rate limiter
pub fn update_rate_limiter(&mut self, bytes: BucketUpdate, ops: BucketUpdate) {
self.rate_limiter.update_buckets(bytes, ops);
}
/// Retrieve the file engine type.
pub fn file_engine_type(&self) -> FileEngineType {
match self.disk.file_engine {
FileEngine::Sync(_) => FileEngineType::Sync,
FileEngine::Async(_) => FileEngineType::Async,
}
}
fn drain_and_flush(&mut self, discard: bool) {
if let Err(err) = self.disk.file_engine.drain_and_flush(discard) {
error!("Failed to drain ops and flush block data: {:?}", err);
}
}
/// Prepare device for being snapshotted.
pub fn prepare_save(&mut self) {
if !self.is_activated() {
return;
}
self.drain_and_flush(false);
if let FileEngine::Async(ref _engine) = self.disk.file_engine {
self.process_async_completion_queue();
}
}
}
impl VirtioDevice for VirtioBlock {
impl_device_type!(VIRTIO_ID_BLOCK);
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features;
}
fn queues(&self) -> &[Queue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [Queue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_evts
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.device_state
.active_state()
.expect("Device is not initialized")
.interrupt
.deref()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
if let Some(config_space_bytes) = self.config_space.as_slice().get(u64_to_usize(offset)..) {
let len = config_space_bytes.len().min(data.len());
data[..len].copy_from_slice(&config_space_bytes[..len]);
} else {
error!("Failed to read config space");
self.metrics.cfg_fails.inc();
}
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
let config_space_bytes = self.config_space.as_mut_slice();
let start = usize::try_from(offset).ok();
let end = start.and_then(|s| s.checked_add(data.len()));
let Some(dst) = start
.zip(end)
.and_then(|(start, end)| config_space_bytes.get_mut(start..end))
else {
error!("Failed to write config space");
self.metrics.cfg_fails.inc();
return;
};
dst.copy_from_slice(data);
}
fn activate(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
for q in self.queues.iter_mut() {
q.initialize(&mem)
.map_err(ActivateError::QueueMemoryError)?;
}
let event_idx = self.has_feature(u64::from(VIRTIO_RING_F_EVENT_IDX));
if event_idx {
for queue in &mut self.queues {
queue.enable_notif_suppression();
}
}
if self.activate_evt.write(1).is_err() {
self.metrics.activate_fails.inc();
return Err(ActivateError::EventFd);
}
self.device_state = DeviceState::Activated(ActiveState { mem, interrupt });
Ok(())
}
fn is_activated(&self) -> bool {
self.device_state.is_activated()
}
}
impl Drop for VirtioBlock {
fn drop(&mut self) {
match self.cache_type {
CacheType::Unsafe => {
if let Err(err) = self.disk.file_engine.drain(true) {
error!("Failed to drain ops on drop: {:?}", err);
}
}
CacheType::Writeback => {
self.drain_and_flush(true);
}
};
}
}
#[cfg(test)]
mod tests {
use std::fs::metadata;
use std::io::{Read, Write};
use std::os::unix::ffi::OsStrExt;
use std::thread;
use std::time::Duration;
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::check_metric_after_block;
use crate::devices::virtio::block::virtio::IO_URING_NUM_ENTRIES;
use crate::devices::virtio::block::virtio::test_utils::{
default_block, read_blk_req_descriptors, set_queue, set_rate_limiter,
simulate_async_completion_event, simulate_queue_and_async_completion_events,
simulate_queue_event,
};
use crate::devices::virtio::queue::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use crate::devices::virtio::test_utils::{VirtQueue, default_interrupt, default_mem};
use crate::rate_limiter::TokenType;
use crate::vstate::memory::{Address, Bytes, GuestAddress};
#[test]
fn test_from_config() {
let block_config = BlockDeviceConfig {
drive_id: "".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(true),
path_on_host: Some("path".to_string()),
rate_limiter: None,
file_engine_type: Default::default(),
socket: None,
};
VirtioBlockConfig::try_from(&block_config).unwrap();
let block_config = BlockDeviceConfig {
drive_id: "".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: None,
path_on_host: None,
rate_limiter: None,
file_engine_type: Default::default(),
socket: Some("sock".to_string()),
};
VirtioBlockConfig::try_from(&block_config).unwrap_err();
let block_config = BlockDeviceConfig {
drive_id: "".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(true),
path_on_host: Some("path".to_string()),
rate_limiter: None,
file_engine_type: Default::default(),
socket: Some("sock".to_string()),
};
VirtioBlockConfig::try_from(&block_config).unwrap_err();
}
#[test]
fn test_disk_backing_file_helper() {
let num_sectors = 2;
let f = TempFile::new().unwrap();
let size = u64::from(SECTOR_SIZE) * num_sectors;
f.as_file().set_len(size).unwrap();
for engine in [FileEngineType::Sync, FileEngineType::Async] {
let disk_properties =
DiskProperties::new(String::from(f.as_path().to_str().unwrap()), true, engine)
.unwrap();
assert_eq!(size, u64::from(SECTOR_SIZE) * num_sectors);
assert_eq!(disk_properties.nsectors, num_sectors);
// Testing `backing_file.virtio_block_disk_image_id()` implies
// duplicating that logic in tests, so skipping it.
let res = DiskProperties::new("invalid-disk-path".to_string(), true, engine);
assert!(
matches!(res, Err(VirtioBlockError::BackingFile(_, _))),
"{:?}",
res
);
}
}
#[test]
fn test_virtio_features() {
for engine in [FileEngineType::Sync, FileEngineType::Async] {
let mut block = default_block(engine);
assert_eq!(block.device_type(), VIRTIO_ID_BLOCK);
let features: u64 = (1u64 << VIRTIO_F_VERSION_1) | (1u64 << VIRTIO_RING_F_EVENT_IDX);
assert_eq!(
block.avail_features_by_page(0),
(features & 0xffffffff) as u32,
);
assert_eq!(block.avail_features_by_page(1), (features >> 32) as u32);
for i in 2..10 {
assert_eq!(block.avail_features_by_page(i), 0u32);
}
for i in 0..10 {
block.ack_features_by_page(i, u32::MAX);
}
assert_eq!(block.acked_features, features);
}
}
#[test]
fn test_virtio_read_config() {
for engine in [FileEngineType::Sync, FileEngineType::Async] {
let block = default_block(engine);
let mut actual_config_space = ConfigSpace::default();
block.read_config(0, actual_config_space.as_mut_slice());
// This will read the number of sectors.
// The block's backing file size is 0x1000, so there are 8 (4096/512) sectors.
// The config space is little endian.
let expected_config_space = ConfigSpace { capacity: 8 };
assert_eq!(actual_config_space, expected_config_space);
// Invalid read.
let expected_config_space = ConfigSpace { capacity: 696969 };
actual_config_space = expected_config_space;
block.read_config(
std::mem::size_of::<ConfigSpace>() as u64 + 1,
actual_config_space.as_mut_slice(),
);
// Validate read failed (the config space was not updated).
assert_eq!(actual_config_space, expected_config_space);
}
}
#[test]
fn test_virtio_write_config() {
for engine in [FileEngineType::Sync, FileEngineType::Async] {
let mut block = default_block(engine);
let expected_config_space = ConfigSpace { capacity: 696969 };
block.write_config(0, expected_config_space.as_slice());
let mut actual_config_space = ConfigSpace::default();
block.read_config(0, actual_config_space.as_mut_slice());
assert_eq!(actual_config_space, expected_config_space);
// If privileged user writes to `/dev/mem`, in block config space - byte by byte.
let expected_config_space = ConfigSpace {
capacity: 0x1122334455667788,
};
let expected_config_space_slice = expected_config_space.as_slice();
for (i, b) in expected_config_space_slice.iter().enumerate() {
block.write_config(i as u64, &[*b]);
}
block.read_config(0, actual_config_space.as_mut_slice());
assert_eq!(actual_config_space, expected_config_space);
// Invalid write.
let new_config_space = ConfigSpace {
capacity: 0xDEADBEEF,
};
block.write_config(5, new_config_space.as_slice());
// Make sure nothing got written.
block.read_config(0, actual_config_space.as_mut_slice());
assert_eq!(actual_config_space, expected_config_space);
// Large offset that may cause an overflow.
block.write_config(u64::MAX, new_config_space.as_slice());
// Make sure nothing got written.
block.read_config(0, actual_config_space.as_mut_slice());
assert_eq!(actual_config_space, expected_config_space);
}
}
#[test]
fn test_invalid_request() {
for engine in [FileEngineType::Sync, FileEngineType::Async] {
let mut block = default_block(engine);
let mem = default_mem();
let interrupt = default_interrupt();
let vq = VirtQueue::new(GuestAddress(0), &mem, 16);
set_queue(&mut block, 0, vq.create_queue());
block.activate(mem.clone(), interrupt).unwrap();
read_blk_req_descriptors(&vq);
let request_type_addr = GuestAddress(vq.dtable[0].addr.get());
// Request is invalid because the first descriptor is write-only.
vq.dtable[0]
.flags
.set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);
mem.write_obj::<u32>(VIRTIO_BLK_T_IN, request_type_addr)
.unwrap();
simulate_queue_event(&mut block, Some(true));
assert_eq!(vq.used.idx.get(), 1);
assert_eq!(vq.used.ring[0].get().id, 0);
assert_eq!(vq.used.ring[0].get().len, 0);
}
}
#[test]
fn test_addr_out_of_bounds() {
for engine in [FileEngineType::Sync, FileEngineType::Async] {
let mut block = default_block(engine);
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/test_utils.rs | src/vmm/src/devices/virtio/block/virtio/test_utils.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![doc(hidden)]
#[cfg(test)]
use std::thread;
#[cfg(test)]
use std::time::Duration;
use vmm_sys_util::tempfile::TempFile;
use super::RequestHeader;
use super::device::VirtioBlockConfig;
use crate::devices::virtio::block::virtio::device::FileEngineType;
#[cfg(test)]
use crate::devices::virtio::block::virtio::io::FileEngine;
use crate::devices::virtio::block::virtio::{CacheType, VirtioBlock};
#[cfg(test)]
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::queue::{Queue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use crate::devices::virtio::test_utils::{VirtQueue, VirtqDesc};
#[cfg(test)]
use crate::devices::virtio::transport::VirtioInterruptType;
use crate::rate_limiter::RateLimiter;
use crate::vmm_config::{RateLimiterConfig, TokenBucketConfig};
use crate::vstate::memory::{Bytes, GuestAddress};
/// Create a default Block instance to be used in tests.
pub fn default_block(file_engine_type: FileEngineType) -> VirtioBlock {
// Create backing file.
let f = TempFile::new().unwrap();
f.as_file().set_len(0x1000).unwrap();
default_block_with_path(f.as_path().to_str().unwrap().to_string(), file_engine_type)
}
/// Create a default Block instance using file at the specified path to be used in tests.
pub fn default_block_with_path(path: String, file_engine_type: FileEngineType) -> VirtioBlock {
let config = VirtioBlockConfig {
drive_id: "test".to_string(),
path_on_host: path,
is_root_device: false,
partuuid: None,
is_read_only: false,
cache_type: CacheType::Unsafe,
// Rate limiting is enabled but with a high operation rate (10 million ops/s).
rate_limiter: Some(RateLimiterConfig {
bandwidth: Some(TokenBucketConfig {
size: 0,
one_time_burst: Some(0),
refill_time: 0,
}),
ops: Some(TokenBucketConfig {
size: 100_000,
one_time_burst: Some(0),
refill_time: 10,
}),
}),
file_engine_type,
};
// The default block device is read-write and non-root.
VirtioBlock::new(config).unwrap()
}
pub fn set_queue(blk: &mut VirtioBlock, idx: usize, q: Queue) {
blk.queues[idx] = q;
}
pub fn set_rate_limiter(blk: &mut VirtioBlock, rl: RateLimiter) {
blk.rate_limiter = rl;
}
pub fn rate_limiter(blk: &mut VirtioBlock) -> &RateLimiter {
&blk.rate_limiter
}
#[cfg(test)]
pub fn simulate_queue_event(b: &mut VirtioBlock, maybe_expected_irq: Option<bool>) {
// Trigger the queue event.
b.queue_evts[0].write(1).unwrap();
// Handle event.
b.process_queue_event();
// Validate the queue operation finished successfully.
if let Some(expected_irq) = maybe_expected_irq {
assert_eq!(
b.interrupt_trigger()
.has_pending_interrupt(VirtioInterruptType::Queue(0)),
expected_irq
);
}
}
#[cfg(test)]
pub fn simulate_async_completion_event(b: &mut VirtioBlock, expected_irq: bool) {
if let FileEngine::Async(ref mut engine) = b.disk.file_engine {
// Wait for all the async operations to complete.
engine.drain(false).unwrap();
// Wait for the async completion event to be sent.
thread::sleep(Duration::from_millis(150));
// Handle event.
b.process_async_completion_event();
}
// Validate if there are pending IRQs.
assert_eq!(
b.interrupt_trigger()
.has_pending_interrupt(VirtioInterruptType::Queue(0)),
expected_irq
);
}
#[cfg(test)]
pub fn simulate_queue_and_async_completion_events(b: &mut VirtioBlock, expected_irq: bool) {
match b.disk.file_engine {
FileEngine::Async(_) => {
simulate_queue_event(b, None);
simulate_async_completion_event(b, expected_irq);
}
FileEngine::Sync(_) => {
simulate_queue_event(b, Some(expected_irq));
}
}
}
/// Structure encapsulating the virtq descriptors of a single request to the block device
#[derive(Debug)]
pub struct RequestDescriptorChain<'a, 'b> {
pub driver_queue: &'b VirtQueue<'a>,
pub header_desc: &'b VirtqDesc<'a>,
pub data_desc: &'b VirtqDesc<'a>,
pub status_desc: &'b VirtqDesc<'a>,
}
impl<'a, 'b> RequestDescriptorChain<'a, 'b> {
/// Creates a new [`RequestDescriptor´] chain in the given [`VirtQueue`]
///
/// The header, data and status descriptors are put into the first three indices in
/// the queue's descriptor table. They point to address 0x1000, 0x2000 and 0x3000 in guest
/// memory, respectively, and each have their `len` set to 0x1000.
///
/// The data descriptor is initialized to be write_only
pub fn new(vq: &'b VirtQueue<'a>) -> Self {
read_blk_req_descriptors(vq);
RequestDescriptorChain {
driver_queue: vq,
header_desc: &vq.dtable[0],
data_desc: &vq.dtable[1],
status_desc: &vq.dtable[2],
}
}
pub fn header(&self) -> RequestHeader {
self.header_desc
.memory()
.read_obj(GuestAddress(self.header_desc.addr.get()))
.unwrap()
}
pub fn set_header(&self, header: RequestHeader) {
self.header_desc
.memory()
.write_obj(header, GuestAddress(self.header_desc.addr.get()))
.unwrap()
}
}
/// Puts a descriptor chain of length three into the given [`VirtQueue`].
///
/// This chain follows the skeleton of a block device request, e.g. the first
/// descriptor offers space for the header (readonly), the second descriptor offers space
/// for the data (set to writeonly, if you want a write request, update to readonly),
/// and the last descriptor for the device-written status field (writeonly).
///
/// The head of the chain is made available as the first descriptor to be processed, by
/// setting avail_idx to 1.
pub fn read_blk_req_descriptors(vq: &VirtQueue) {
let request_type_desc: u16 = 0;
let data_desc: u16 = 1;
let status_desc: u16 = 2;
let request_addr: u64 = 0x1000;
let data_addr: u64 = 0x2000;
let status_addr: u64 = 0x3000;
let len = 0x1000;
// Set the request type descriptor.
vq.avail.ring[request_type_desc as usize].set(request_type_desc);
vq.dtable[request_type_desc as usize].set(request_addr, len, VIRTQ_DESC_F_NEXT, data_desc);
// Set the data descriptor.
vq.avail.ring[data_desc as usize].set(data_desc);
vq.dtable[data_desc as usize].set(
data_addr,
len,
VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE,
status_desc,
);
// Set the status descriptor.
vq.avail.ring[status_desc as usize].set(status_desc);
vq.dtable[status_desc as usize].set(status_addr, len, VIRTQ_DESC_F_WRITE, status_desc + 1);
// Mark the next available descriptor.
vq.avail.idx.set(1);
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/mod.rs | src/vmm/src/devices/virtio/block/virtio/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Implements a virtio block device.
pub mod device;
mod event_handler;
mod io;
pub mod metrics;
pub mod persist;
pub mod request;
pub mod test_utils;
use vm_memory::GuestMemoryError;
pub use self::device::VirtioBlock;
pub use self::request::*;
pub use crate::devices::virtio::block::CacheType;
use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE;
/// Sector shift for block device.
pub const SECTOR_SHIFT: u8 = 9;
/// Size of block sector.
pub const SECTOR_SIZE: u32 = (0x01_u32) << SECTOR_SHIFT;
/// The number of queues of block device.
pub const BLOCK_NUM_QUEUES: usize = 1;
pub const BLOCK_QUEUE_SIZES: [u16; BLOCK_NUM_QUEUES] = [FIRECRACKER_MAX_QUEUE_SIZE];
// The virtio queue can hold up to 256 descriptors, but 1 request spreads across 2-3 descriptors.
// So we can use 128 IO_URING entries without ever triggering a FullSq Error.
/// Maximum number of io uring entries we allow in the queue.
pub const IO_URING_NUM_ENTRIES: u16 = 128;
/// Errors the block device can trigger.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VirtioBlockError {
/// Cannot create config
Config,
/// Guest gave us too few descriptors in a descriptor chain.
DescriptorChainTooShort,
/// Guest gave us a descriptor that was too short to use.
DescriptorLengthTooSmall,
/// Getting a block's metadata fails for any reason.
GetFileMetadata(std::io::Error),
/// Guest gave us bad memory addresses.
GuestMemory(GuestMemoryError),
/// The data length is invalid.
InvalidDataLength,
/// The requested operation would cause a seek beyond disk end.
InvalidOffset,
/// Guest gave us a read only descriptor that protocol says to write to.
UnexpectedReadOnlyDescriptor,
/// Guest gave us a write only descriptor that protocol says to read from.
UnexpectedWriteOnlyDescriptor,
/// Error coming from the IO engine: {0}
FileEngine(io::BlockIoError),
/// Error manipulating the backing file: {0} {1}
BackingFile(std::io::Error, String),
/// Error opening eventfd: {0}
EventFd(std::io::Error),
/// Error creating an interrupt: {0}
Interrupt(std::io::Error),
/// Error coming from the rate limiter: {0}
RateLimiter(std::io::Error),
/// Persistence error: {0}
Persist(crate::devices::virtio::persist::PersistError),
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/event_handler.rs | src/vmm/src/devices/virtio/block/virtio/event_handler.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use event_manager::{EventOps, Events, MutEventSubscriber};
use vmm_sys_util::epoll::EventSet;
use super::io::FileEngine;
use crate::devices::virtio::block::virtio::device::VirtioBlock;
use crate::devices::virtio::device::VirtioDevice;
use crate::logger::{error, warn};
impl VirtioBlock {
const PROCESS_ACTIVATE: u32 = 0;
const PROCESS_QUEUE: u32 = 1;
const PROCESS_RATE_LIMITER: u32 = 2;
const PROCESS_ASYNC_COMPLETION: u32 = 3;
fn register_runtime_events(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.queue_evts[0],
Self::PROCESS_QUEUE,
EventSet::IN,
)) {
error!("Failed to register queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.rate_limiter,
Self::PROCESS_RATE_LIMITER,
EventSet::IN,
)) {
error!("Failed to register ratelimiter event: {}", err);
}
if let FileEngine::Async(ref engine) = self.disk.file_engine
&& let Err(err) = ops.add(Events::with_data(
engine.completion_evt(),
Self::PROCESS_ASYNC_COMPLETION,
EventSet::IN,
))
{
error!("Failed to register IO engine completion event: {}", err);
}
}
fn register_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to register activate event: {}", err);
}
}
fn process_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = self.activate_evt.read() {
error!("Failed to consume block activate event: {:?}", err);
}
self.register_runtime_events(ops);
if let Err(err) = ops.remove(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to un-register activate event: {}", err);
}
}
}
impl MutEventSubscriber for VirtioBlock {
// Handle an event for queue or rate limiter.
fn process(&mut self, event: Events, ops: &mut EventOps) {
let source = event.data();
let event_set = event.event_set();
// TODO: also check for errors. Pending high level discussions on how we want
// to handle errors in devices.
let supported_events = EventSet::IN;
if !supported_events.contains(event_set) {
warn!(
"Block: Received unknown event: {:?} from source: {:?}",
event_set, source
);
return;
}
if self.is_activated() {
match source {
Self::PROCESS_ACTIVATE => self.process_activate_event(ops),
Self::PROCESS_QUEUE => self.process_queue_event(),
Self::PROCESS_RATE_LIMITER => self.process_rate_limiter_event(),
Self::PROCESS_ASYNC_COMPLETION => self.process_async_completion_event(),
_ => warn!("Block: Spurious event received: {:?}", source),
}
} else {
warn!(
"Block: The device is not yet activated. Spurious event received: {:?}",
source
);
}
}
fn init(&mut self, ops: &mut EventOps) {
// This function can be called during different points in the device lifetime:
// - shortly after device creation,
// - on device activation (is-activated already true at this point),
// - on device restore from snapshot.
if self.is_activated() {
self.register_runtime_events(ops);
} else {
self.register_activate_event(ops);
}
}
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use event_manager::{EventManager, SubscriberOps};
use super::*;
use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::devices::virtio::block::virtio::test_utils::{
default_block, read_blk_req_descriptors, set_queue, simulate_async_completion_event,
};
use crate::devices::virtio::block::virtio::{VIRTIO_BLK_S_OK, VIRTIO_BLK_T_OUT};
use crate::devices::virtio::queue::VIRTQ_DESC_F_NEXT;
use crate::devices::virtio::test_utils::{VirtQueue, default_interrupt, default_mem};
use crate::vstate::memory::{Bytes, GuestAddress};
#[test]
fn test_event_handler() {
let mut event_manager = EventManager::new().unwrap();
let mut block = default_block(FileEngineType::default());
let mem = default_mem();
let interrupt = default_interrupt();
let vq = VirtQueue::new(GuestAddress(0), &mem, 16);
set_queue(&mut block, 0, vq.create_queue());
read_blk_req_descriptors(&vq);
let block = Arc::new(Mutex::new(block));
let _id = event_manager.add_subscriber(block.clone());
let request_type_addr = GuestAddress(vq.dtable[0].addr.get());
let data_addr = GuestAddress(vq.dtable[1].addr.get());
let status_addr = GuestAddress(vq.dtable[2].addr.get());
// Push a 'Write' operation.
{
mem.write_obj::<u32>(VIRTIO_BLK_T_OUT, request_type_addr)
.unwrap();
// Make data read only, 512 bytes in len, and set the actual value to be written.
vq.dtable[1].flags.set(VIRTQ_DESC_F_NEXT);
vq.dtable[1].len.set(512);
mem.write_obj::<u64>(123_456_789, data_addr).unwrap();
// Trigger the queue event.
block.lock().unwrap().queue_evts[0].write(1).unwrap();
}
// EventManager should report no events since block has only registered
// its activation event so far (even though queue event is pending).
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 0);
// Now activate the device.
block
.lock()
.unwrap()
.activate(mem.clone(), interrupt)
.unwrap();
// Process the activate event.
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 1);
// Handle the pending queue event through EventManager.
event_manager
.run_with_timeout(100)
.expect("Metrics event timeout or error.");
// Complete async IO ops if needed
simulate_async_completion_event(&mut block.lock().unwrap(), true);
assert_eq!(vq.used.idx.get(), 1);
assert_eq!(vq.used.ring[0].get().id, 0);
assert_eq!(vq.used.ring[0].get().len, 1);
assert_eq!(mem.read_obj::<u32>(status_addr).unwrap(), VIRTIO_BLK_S_OK);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/metrics.rs | src/vmm/src/devices/virtio/block/virtio/metrics.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system for block devices.
//!
//! # Metrics format
//! The metrics are flushed in JSON when requested by vmm::logger::metrics::METRICS.write().
//!
//! ## JSON example with metrics:
//! ```json
//! {
//! "block_drv0": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "no_avail_buffer": "SharedIncMetric",
//! "event_fails": "SharedIncMetric",
//! "execute_fails": "SharedIncMetric",
//! ...
//! }
//! "block_drv1": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "no_avail_buffer": "SharedIncMetric",
//! "event_fails": "SharedIncMetric",
//! "execute_fails": "SharedIncMetric",
//! ...
//! }
//! ...
//! "block_drive_id": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "no_avail_buffer": "SharedIncMetric",
//! "event_fails": "SharedIncMetric",
//! "execute_fails": "SharedIncMetric",
//! ...
//! }
//! "block": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "no_avail_buffer": "SharedIncMetric",
//! "event_fails": "SharedIncMetric",
//! "execute_fails": "SharedIncMetric",
//! ...
//! }
//! }
//! ```
//! Each `block` field in the example above is a serializable `BlockDeviceMetrics` structure
//! collecting metrics such as `activate_fails`, `cfg_fails`, etc. for the block device.
//! `block_drv0` represent metrics for the endpoint "/drives/drv0",
//! `block_drv1` represent metrics for the endpoint "/drives/drv1", and
//! `block_drive_id` represent metrics for the endpoint "/drives/{drive_id}"
//! block device respectively and `block` is the aggregate of all the per device metrics.
//!
//! # Limitations
//! block device currently do not have `vmm::logger::metrics::StoreMetrics` so aggregate
//! doesn't consider them.
//!
//! # Design
//! The main design goals of this system are:
//! * To improve block device metrics by logging them at per device granularity.
//! * Continue to provide aggregate block metrics to maintain backward compatibility.
//! * Move BlockDeviceMetrics out of from logger and decouple it.
//! * Rely on `serde` to provide the actual serialization for writing the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them, to
//! avoid having to initialize everything by hand.
//!
//! * Devices could be created in any order i.e. the first device created could either be drv0 or
//! drv1 so if we use a vector for BlockDeviceMetrics and call 1st device as block0, then block0
//! could sometimes point to drv0 and sometimes to drv1 which doesn't help with analysing the
//! metrics. So, use Map instead of Vec to help understand which drive the metrics actually
//! belongs to.
//!
//! The system implements 1 type of metrics:
//! * Shared Incremental Metrics (SharedIncMetrics) - dedicated for the metrics which need a counter
//! (i.e the number of times an API request failed). These metrics are reset upon flush.
//!
//! We add BlockDeviceMetrics entries from block::metrics::METRICS into Block device instead of
//! Block device having individual separate BlockDeviceMetrics entries because Block device is not
//! accessible from signal handlers to flush metrics and block::metrics::METRICS is.
use std::collections::BTreeMap;
use std::sync::{Arc, RwLock};
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use crate::logger::{IncMetric, LatencyAggregateMetrics, SharedIncMetric};
/// map of block drive id and metrics
/// this should be protected by a lock before accessing.
#[derive(Debug)]
pub struct BlockMetricsPerDevice {
/// used to access per block device metrics
pub metrics: BTreeMap<String, Arc<BlockDeviceMetrics>>,
}
impl BlockMetricsPerDevice {
/// Allocate `BlockDeviceMetrics` for block device having
/// id `drive_id`. Also, allocate only if it doesn't
/// exist to avoid overwriting previously allocated data.
/// lock is always initialized so it is safe the unwrap
/// the lock without a check.
pub fn alloc(drive_id: String) -> Arc<BlockDeviceMetrics> {
Arc::clone(
METRICS
.write()
.unwrap()
.metrics
.entry(drive_id)
.or_insert_with(|| Arc::new(BlockDeviceMetrics::default())),
)
}
}
/// Pool of block-related metrics per device behind a lock to
/// keep things thread safe. Since the lock is initialized here
/// it is safe to unwrap it without any check.
static METRICS: RwLock<BlockMetricsPerDevice> = RwLock::new(BlockMetricsPerDevice {
metrics: BTreeMap::new(),
});
/// This function facilitates aggregation and serialization of
/// per block device metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let block_metrics = METRICS.read().unwrap();
let metrics_len = block_metrics.metrics.len();
// +1 to accommodate aggregate block metrics
let mut seq = serializer.serialize_map(Some(1 + metrics_len))?;
let mut block_aggregated: BlockDeviceMetrics = BlockDeviceMetrics::default();
for (name, metrics) in block_metrics.metrics.iter() {
let devn = format!("block_{}", name);
// serialization will flush the metrics so aggregate before it.
let m: &BlockDeviceMetrics = metrics;
block_aggregated.aggregate(m);
seq.serialize_entry(&devn, m)?;
}
seq.serialize_entry("block", &block_aggregated)?;
seq.end()
}
/// Block Device associated metrics.
#[derive(Debug, Default, Serialize)]
pub struct BlockDeviceMetrics {
/// Number of times when activate failed on a block device.
pub activate_fails: SharedIncMetric,
/// Number of times when interacting with the space config of a block device failed.
pub cfg_fails: SharedIncMetric,
/// No available buffer for the block queue.
pub no_avail_buffer: SharedIncMetric,
/// Number of times when handling events on a block device failed.
pub event_fails: SharedIncMetric,
/// Number of failures in executing a request on a block device.
pub execute_fails: SharedIncMetric,
/// Number of invalid requests received for this block device.
pub invalid_reqs_count: SharedIncMetric,
/// Number of flushes operation triggered on this block device.
pub flush_count: SharedIncMetric,
/// Number of events triggered on the queue of this block device.
pub queue_event_count: SharedIncMetric,
/// Number of events ratelimiter-related.
pub rate_limiter_event_count: SharedIncMetric,
/// Number of update operation triggered on this block device.
pub update_count: SharedIncMetric,
/// Number of failures while doing update on this block device.
pub update_fails: SharedIncMetric,
/// Number of bytes read by this block device.
pub read_bytes: SharedIncMetric,
/// Number of bytes written by this block device.
pub write_bytes: SharedIncMetric,
/// Number of successful read operations.
pub read_count: SharedIncMetric,
/// Number of successful write operations.
pub write_count: SharedIncMetric,
/// Duration of all read operations.
pub read_agg: LatencyAggregateMetrics,
/// Duration of all write operations.
pub write_agg: LatencyAggregateMetrics,
/// Number of rate limiter throttling events.
pub rate_limiter_throttled_events: SharedIncMetric,
/// Number of virtio events throttled because of the IO engine.
/// This happens when the io_uring submission queue is full.
pub io_engine_throttled_events: SharedIncMetric,
/// Number of remaining requests in the queue.
pub remaining_reqs_count: SharedIncMetric,
}
impl BlockDeviceMetrics {
/// Const default construction.
pub fn new() -> Self {
Self {
read_agg: LatencyAggregateMetrics::new(),
write_agg: LatencyAggregateMetrics::new(),
..Default::default()
}
}
/// block metrics are SharedIncMetric where the diff of current vs
/// old is serialized i.e. serialize_u64(current-old).
/// So to have the aggregate serialized in same way we need to
/// fetch the diff of current vs old metrics and add it to the
/// aggregate.
pub fn aggregate(&mut self, other: &Self) {
self.activate_fails.add(other.activate_fails.fetch_diff());
self.cfg_fails.add(other.cfg_fails.fetch_diff());
self.no_avail_buffer.add(other.no_avail_buffer.fetch_diff());
self.event_fails.add(other.event_fails.fetch_diff());
self.execute_fails.add(other.execute_fails.fetch_diff());
self.invalid_reqs_count
.add(other.invalid_reqs_count.fetch_diff());
self.flush_count.add(other.flush_count.fetch_diff());
self.queue_event_count
.add(other.queue_event_count.fetch_diff());
self.rate_limiter_event_count
.add(other.rate_limiter_event_count.fetch_diff());
self.update_count.add(other.update_count.fetch_diff());
self.update_fails.add(other.update_fails.fetch_diff());
self.read_bytes.add(other.read_bytes.fetch_diff());
self.write_bytes.add(other.write_bytes.fetch_diff());
self.read_count.add(other.read_count.fetch_diff());
self.write_count.add(other.write_count.fetch_diff());
self.read_agg.sum_us.add(other.read_agg.sum_us.fetch_diff());
self.write_agg
.sum_us
.add(other.write_agg.sum_us.fetch_diff());
self.rate_limiter_throttled_events
.add(other.rate_limiter_throttled_events.fetch_diff());
self.io_engine_throttled_events
.add(other.io_engine_throttled_events.fetch_diff());
self.remaining_reqs_count
.add(other.remaining_reqs_count.fetch_diff());
}
}
#[cfg(test)]
pub mod tests {
use super::*;
#[test]
fn test_max_block_dev_metrics() {
// Note: this test has nothing to do with
// block structure or IRQs, this is just to allocate
// metrics for max number of devices that system can have.
// We have 5-23 IRQ for block devices on x86_64 so, there
// are 19 block devices at max. And, even though we have more
// devices on aarch64 but we stick to 19 to keep test common.
const MAX_BLOCK_DEVICES: usize = 19;
// This is to make sure that RwLock for block::metrics::METRICS is good.
drop(METRICS.read().unwrap());
drop(METRICS.write().unwrap());
// block::metrics::METRICS is in short RwLock on Vec of BlockDeviceMetrics.
// Normally, pointer to unique entries of block::metrics::METRICS are stored
// in Block device so that Block device can do self.metrics.* to
// update a metric. We try to do something similar here without
// using Block device by allocating max number of
// BlockDeviceMetrics in block::metrics::METRICS and store pointer to
// each entry in the local `metrics` vec.
// We then update 1 IncMetric and 2 SharedMetric for each metrics
// and validate if the metrics for per device was updated as
// expected.
let mut metrics: Vec<Arc<BlockDeviceMetrics>> = Vec::new();
for i in 0..MAX_BLOCK_DEVICES {
let devn: String = format!("drv{}", i);
metrics.push(BlockMetricsPerDevice::alloc(devn.clone()));
// update IncMetric
metrics[i].activate_fails.inc();
// update SharedMetric
metrics[i].read_bytes.add(10);
metrics[i].write_bytes.add(5);
if i == 0 {
// Unit tests run in parallel and we have
// `test_single_block_dev_metrics` that also increases
// the IncMetric count of drv0 by 1 (intentional to check
// thread safety) so we check if the count is >=1.
assert!(metrics[i].activate_fails.count() >= 1);
// For the same reason as above since we have
// another unit test running in parallel which updates
// drv0 metrics we check if count is >=10.
assert!(metrics[i].read_bytes.count() >= 10);
} else {
assert!(metrics[i].activate_fails.count() == 1);
assert!(metrics[i].read_bytes.count() == 10);
}
assert_eq!(metrics[i].write_bytes.count(), 5);
}
}
#[test]
fn test_single_block_dev_metrics() {
// Use drv0 so that we can check thread safety with the
// `test_max_block_dev_metrics` which also uses the same name.
let devn = "drv0";
// This is to make sure that RwLock for block::metrics::METRICS is good.
drop(METRICS.read().unwrap());
drop(METRICS.write().unwrap());
let test_metrics = BlockMetricsPerDevice::alloc(String::from(devn));
// Test to update IncMetrics
test_metrics.activate_fails.inc();
assert!(
test_metrics.activate_fails.count() > 0,
"{}",
test_metrics.activate_fails.count()
);
// We expect only 2 tests (this and test_max_block_dev_metrics)
// to update activate_fails count for drv0.
assert!(
test_metrics.activate_fails.count() <= 2,
"{}",
test_metrics.activate_fails.count()
);
// Test to update SharedMetrics
test_metrics.read_bytes.add(5);
// We expect only 2 tests (this and test_max_block_dev_metrics)
// to update read_bytes count for drv0 by 5.
assert!(test_metrics.read_bytes.count() >= 5);
assert!(test_metrics.read_bytes.count() <= 15);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/request.rs | src/vmm/src/devices/virtio/block/virtio/request.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::convert::From;
use vm_memory::GuestMemoryError;
use super::{SECTOR_SHIFT, SECTOR_SIZE, VirtioBlockError, io as block_io};
use crate::devices::virtio::block::virtio::device::DiskProperties;
use crate::devices::virtio::block::virtio::metrics::BlockDeviceMetrics;
pub use crate::devices::virtio::generated::virtio_blk::{
VIRTIO_BLK_ID_BYTES, VIRTIO_BLK_S_IOERR, VIRTIO_BLK_S_OK, VIRTIO_BLK_S_UNSUPP,
VIRTIO_BLK_T_FLUSH, VIRTIO_BLK_T_GET_ID, VIRTIO_BLK_T_IN, VIRTIO_BLK_T_OUT,
};
use crate::devices::virtio::queue::DescriptorChain;
use crate::logger::{IncMetric, error};
use crate::rate_limiter::{RateLimiter, TokenType};
use crate::vstate::memory::{ByteValued, Bytes, GuestAddress, GuestMemoryMmap};
#[derive(Debug, derive_more::From)]
pub enum IoErr {
GetId(GuestMemoryError),
PartialTransfer { completed: u32, expected: u32 },
FileEngine(block_io::BlockIoError),
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RequestType {
In,
Out,
Flush,
GetDeviceID,
Unsupported(u32),
}
impl From<u32> for RequestType {
fn from(value: u32) -> Self {
match value {
VIRTIO_BLK_T_IN => RequestType::In,
VIRTIO_BLK_T_OUT => RequestType::Out,
VIRTIO_BLK_T_FLUSH => RequestType::Flush,
VIRTIO_BLK_T_GET_ID => RequestType::GetDeviceID,
t => RequestType::Unsupported(t),
}
}
}
#[derive(Debug)]
pub enum ProcessingResult {
Submitted,
Throttled,
Executed(FinishedRequest),
}
#[derive(Debug)]
pub struct FinishedRequest {
pub num_bytes_to_mem: u32,
pub desc_idx: u16,
}
#[derive(Debug)]
enum Status {
Ok { num_bytes_to_mem: u32 },
IoErr { num_bytes_to_mem: u32, err: IoErr },
Unsupported { op: u32 },
}
impl Status {
fn from_data(data_len: u32, transferred_data_len: u32, data_to_mem: bool) -> Status {
let num_bytes_to_mem = match data_to_mem {
true => transferred_data_len,
false => 0,
};
match transferred_data_len == data_len {
true => Status::Ok { num_bytes_to_mem },
false => Status::IoErr {
num_bytes_to_mem,
err: IoErr::PartialTransfer {
completed: transferred_data_len,
expected: data_len,
},
},
}
}
}
#[derive(Debug)]
pub struct PendingRequest {
r#type: RequestType,
data_len: u32,
status_addr: GuestAddress,
desc_idx: u16,
}
impl PendingRequest {
fn write_status_and_finish(
self,
status: &Status,
mem: &GuestMemoryMmap,
block_metrics: &BlockDeviceMetrics,
) -> FinishedRequest {
let (num_bytes_to_mem, status_code) = match status {
Status::Ok { num_bytes_to_mem } => {
(*num_bytes_to_mem, u8::try_from(VIRTIO_BLK_S_OK).unwrap())
}
Status::IoErr {
num_bytes_to_mem,
err,
} => {
block_metrics.invalid_reqs_count.inc();
error!(
"Failed to execute {:?} virtio block request: {:?}",
self.r#type, err
);
(*num_bytes_to_mem, u8::try_from(VIRTIO_BLK_S_IOERR).unwrap())
}
Status::Unsupported { op } => {
block_metrics.invalid_reqs_count.inc();
error!("Received unsupported virtio block request: {}", op);
(0, u8::try_from(VIRTIO_BLK_S_UNSUPP).unwrap())
}
};
let num_bytes_to_mem = mem
.write_obj(status_code, self.status_addr)
.map(|_| {
// Account for the status byte
num_bytes_to_mem + 1
})
.unwrap_or_else(|err| {
error!("Failed to write virtio block status: {:?}", err);
// If we can't write the status, discard the virtio descriptor
0
});
FinishedRequest {
num_bytes_to_mem,
desc_idx: self.desc_idx,
}
}
pub fn finish(
self,
mem: &GuestMemoryMmap,
res: Result<u32, IoErr>,
block_metrics: &BlockDeviceMetrics,
) -> FinishedRequest {
let status = match (res, self.r#type) {
(Ok(transferred_data_len), RequestType::In) => {
let status = Status::from_data(self.data_len, transferred_data_len, true);
block_metrics.read_bytes.add(transferred_data_len.into());
if let Status::Ok { .. } = status {
block_metrics.read_count.inc();
}
status
}
(Ok(transferred_data_len), RequestType::Out) => {
let status = Status::from_data(self.data_len, transferred_data_len, false);
block_metrics.write_bytes.add(transferred_data_len.into());
if let Status::Ok { .. } = status {
block_metrics.write_count.inc();
}
status
}
(Ok(_), RequestType::Flush) => {
block_metrics.flush_count.inc();
Status::Ok {
num_bytes_to_mem: 0,
}
}
(Ok(transferred_data_len), RequestType::GetDeviceID) => {
Status::from_data(self.data_len, transferred_data_len, true)
}
(_, RequestType::Unsupported(op)) => Status::Unsupported { op },
(Err(err), _) => Status::IoErr {
num_bytes_to_mem: 0,
err,
},
};
self.write_status_and_finish(&status, mem, block_metrics)
}
}
/// The request header represents the mandatory fields of each block device request.
///
/// A request header contains the following fields:
/// * request_type: an u32 value mapping to a read, write or flush operation.
/// * reserved: 32 bits are reserved for future extensions of the Virtio Spec.
/// * sector: an u64 value representing the offset where a read/write is to occur.
///
/// The header simplifies reading the request from memory as all request follow
/// the same memory layout.
#[derive(Debug, Copy, Clone, Default)]
#[repr(C)]
pub struct RequestHeader {
request_type: u32,
_reserved: u32,
sector: u64,
}
// SAFETY: Safe because RequestHeader only contains plain data.
unsafe impl ByteValued for RequestHeader {}
impl RequestHeader {
pub fn new(request_type: u32, sector: u64) -> RequestHeader {
RequestHeader {
request_type,
_reserved: 0,
sector,
}
}
/// Reads the request header from GuestMemoryMmap starting at `addr`.
///
/// Virtio 1.0 specifies that the data is transmitted by the driver in little-endian
/// format. Firecracker currently runs only on little endian platforms so we don't
/// need to do an explicit little endian read as all reads are little endian by default.
/// When running on a big endian platform, this code should not compile, and support
/// for explicit little endian reads is required.
#[cfg(target_endian = "little")]
fn read_from(memory: &GuestMemoryMmap, addr: GuestAddress) -> Result<Self, VirtioBlockError> {
let request_header: RequestHeader = memory
.read_obj(addr)
.map_err(VirtioBlockError::GuestMemory)?;
Ok(request_header)
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct Request {
pub r#type: RequestType,
pub data_len: u32,
pub status_addr: GuestAddress,
sector: u64,
data_addr: GuestAddress,
}
impl Request {
pub fn parse(
avail_desc: &DescriptorChain,
mem: &GuestMemoryMmap,
num_disk_sectors: u64,
) -> Result<Request, VirtioBlockError> {
// The head contains the request type which MUST be readable.
if avail_desc.is_write_only() {
return Err(VirtioBlockError::UnexpectedWriteOnlyDescriptor);
}
let request_header = RequestHeader::read_from(mem, avail_desc.addr)?;
let mut req = Request {
r#type: RequestType::from(request_header.request_type),
sector: request_header.sector,
data_addr: GuestAddress(0),
data_len: 0,
status_addr: GuestAddress(0),
};
let data_desc;
let status_desc;
let desc = avail_desc
.next_descriptor()
.ok_or(VirtioBlockError::DescriptorChainTooShort)?;
if !desc.has_next() {
status_desc = desc;
// Only flush requests are allowed to skip the data descriptor.
if req.r#type != RequestType::Flush {
return Err(VirtioBlockError::DescriptorChainTooShort);
}
} else {
data_desc = desc;
status_desc = data_desc
.next_descriptor()
.ok_or(VirtioBlockError::DescriptorChainTooShort)?;
if data_desc.is_write_only() && req.r#type == RequestType::Out {
return Err(VirtioBlockError::UnexpectedWriteOnlyDescriptor);
}
if !data_desc.is_write_only() && req.r#type == RequestType::In {
return Err(VirtioBlockError::UnexpectedReadOnlyDescriptor);
}
if !data_desc.is_write_only() && req.r#type == RequestType::GetDeviceID {
return Err(VirtioBlockError::UnexpectedReadOnlyDescriptor);
}
req.data_addr = data_desc.addr;
req.data_len = data_desc.len;
}
// check request validity
match req.r#type {
RequestType::In | RequestType::Out => {
// Check that the data length is a multiple of 512 as specified in the virtio
// standard.
if req.data_len % SECTOR_SIZE != 0 {
return Err(VirtioBlockError::InvalidDataLength);
}
let top_sector = req
.sector
.checked_add(u64::from(req.data_len) >> SECTOR_SHIFT)
.ok_or(VirtioBlockError::InvalidOffset)?;
if top_sector > num_disk_sectors {
return Err(VirtioBlockError::InvalidOffset);
}
}
RequestType::GetDeviceID => {
if req.data_len < VIRTIO_BLK_ID_BYTES {
return Err(VirtioBlockError::InvalidDataLength);
}
}
_ => {}
}
// The status MUST always be writable.
if !status_desc.is_write_only() {
return Err(VirtioBlockError::UnexpectedReadOnlyDescriptor);
}
if status_desc.len < 1 {
return Err(VirtioBlockError::DescriptorLengthTooSmall);
}
req.status_addr = status_desc.addr;
Ok(req)
}
pub(crate) fn rate_limit(&self, rate_limiter: &mut RateLimiter) -> bool {
// If limiter.consume() fails it means there is no more TokenType::Ops
// budget and rate limiting is in effect.
if !rate_limiter.consume(1, TokenType::Ops) {
return true;
}
// Exercise the rate limiter only if this request is of data transfer type.
if self.r#type == RequestType::In || self.r#type == RequestType::Out {
// If limiter.consume() fails it means there is no more TokenType::Bytes
// budget and rate limiting is in effect.
if !rate_limiter.consume(u64::from(self.data_len), TokenType::Bytes) {
// Revert the OPS consume().
rate_limiter.manual_replenish(1, TokenType::Ops);
return true;
}
}
false
}
fn offset(&self) -> u64 {
self.sector << SECTOR_SHIFT
}
fn to_pending_request(&self, desc_idx: u16) -> PendingRequest {
PendingRequest {
r#type: self.r#type,
data_len: self.data_len,
status_addr: self.status_addr,
desc_idx,
}
}
pub(crate) fn process(
self,
disk: &mut DiskProperties,
desc_idx: u16,
mem: &GuestMemoryMmap,
block_metrics: &BlockDeviceMetrics,
) -> ProcessingResult {
let pending = self.to_pending_request(desc_idx);
let res = match self.r#type {
RequestType::In => {
let _metric = block_metrics.read_agg.record_latency_metrics();
disk.file_engine
.read(self.offset(), mem, self.data_addr, self.data_len, pending)
}
RequestType::Out => {
let _metric = block_metrics.write_agg.record_latency_metrics();
disk.file_engine
.write(self.offset(), mem, self.data_addr, self.data_len, pending)
}
RequestType::Flush => disk.file_engine.flush(pending),
RequestType::GetDeviceID => {
let res = mem
.write_slice(&disk.image_id, self.data_addr)
.map(|_| VIRTIO_BLK_ID_BYTES)
.map_err(IoErr::GetId);
return ProcessingResult::Executed(pending.finish(mem, res, block_metrics));
}
RequestType::Unsupported(_) => {
return ProcessingResult::Executed(pending.finish(mem, Ok(0), block_metrics));
}
};
match res {
Ok(block_io::FileEngineOk::Submitted) => ProcessingResult::Submitted,
Ok(block_io::FileEngineOk::Executed(res)) => {
ProcessingResult::Executed(res.req.finish(mem, Ok(res.count), block_metrics))
}
Err(err) => {
if err.error.is_throttling_err() {
ProcessingResult::Throttled
} else {
ProcessingResult::Executed(err.req.finish(
mem,
Err(IoErr::FileEngine(err.error)),
block_metrics,
))
}
}
}
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use super::*;
use crate::devices::virtio::queue::{Queue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use crate::devices::virtio::test_utils::{VirtQueue, default_mem};
use crate::vstate::memory::{Address, GuestAddress, GuestMemory};
const NUM_DISK_SECTORS: u64 = 1024;
impl Default for PendingRequest {
fn default() -> Self {
PendingRequest {
r#type: RequestType::In,
data_len: 0,
status_addr: Default::default(),
desc_idx: 0,
}
}
}
#[test]
fn test_read_request_header() {
let mem = single_region_mem(0x1000);
let addr = GuestAddress(0);
let sector = 123_454_321;
// Test that all supported request types are read correctly from memory.
let supported_request_types = vec![
VIRTIO_BLK_T_IN,
VIRTIO_BLK_T_OUT,
VIRTIO_BLK_T_FLUSH,
VIRTIO_BLK_T_GET_ID,
];
for request_type in supported_request_types {
let expected_header = RequestHeader::new(request_type, sector);
mem.write_obj::<RequestHeader>(expected_header, addr)
.unwrap();
let actual_header = RequestHeader::read_from(&mem, addr).unwrap();
assert_eq!(actual_header.request_type, expected_header.request_type);
assert_eq!(actual_header.sector, expected_header.sector);
}
// Test that trying to read a request header that goes outside of the
// memory boundary fails.
RequestHeader::read_from(&mem, GuestAddress(0x1000)).unwrap_err();
}
#[test]
fn test_request_type_from() {
assert_eq!(RequestType::from(VIRTIO_BLK_T_IN), RequestType::In);
assert_eq!(RequestType::from(VIRTIO_BLK_T_OUT), RequestType::Out);
assert_eq!(RequestType::from(VIRTIO_BLK_T_FLUSH), RequestType::Flush);
assert_eq!(
RequestType::from(VIRTIO_BLK_T_GET_ID),
RequestType::GetDeviceID
);
assert_eq!(RequestType::from(42), RequestType::Unsupported(42));
}
impl RequestDescriptorChain<'_, '_> {
fn check_parse_err(&self, _e: VirtioBlockError) {
let mut q = self.driver_queue.create_queue();
let memory = self.driver_queue.memory();
assert!(matches!(
Request::parse(&q.pop().unwrap().unwrap(), memory, NUM_DISK_SECTORS),
Err(_e)
));
}
fn check_parse(&self, check_data: bool) {
let mut q = self.driver_queue.create_queue();
let memory = self.driver_queue.memory();
let request =
Request::parse(&q.pop().unwrap().unwrap(), memory, NUM_DISK_SECTORS).unwrap();
let expected_header = self.header();
assert_eq!(
request.r#type,
RequestType::from(expected_header.request_type)
);
assert_eq!(request.sector, expected_header.sector);
if check_data {
assert_eq!(request.data_addr.raw_value(), self.data_desc.addr.get());
assert_eq!(request.data_len, self.data_desc.len.get());
}
assert_eq!(request.status_addr.raw_value(), self.status_desc.addr.get());
}
}
#[test]
fn test_parse_generic() {
let mem = &default_mem();
let queue = VirtQueue::new(GuestAddress(0), mem, 16);
let chain = RequestDescriptorChain::new(&queue);
let request_header = RequestHeader::new(100, 114);
chain.set_header(request_header);
// Write only request type descriptor.
chain.header_desc.flags.set(VIRTQ_DESC_F_WRITE);
chain.check_parse_err(VirtioBlockError::UnexpectedWriteOnlyDescriptor);
// Chain too short: no DATA_DESCRIPTOR.
chain.header_desc.flags.set(0);
chain.check_parse_err(VirtioBlockError::DescriptorChainTooShort);
// Chain too short: no status descriptor.
chain.header_desc.flags.set(VIRTQ_DESC_F_NEXT);
chain.data_desc.flags.set(0);
chain.check_parse_err(VirtioBlockError::DescriptorChainTooShort);
// Status descriptor not writable.
chain.data_desc.flags.set(VIRTQ_DESC_F_NEXT);
chain.status_desc.flags.set(0);
chain.check_parse_err(VirtioBlockError::UnexpectedReadOnlyDescriptor);
// Status descriptor too small.
chain.status_desc.flags.set(VIRTQ_DESC_F_WRITE);
chain.status_desc.len.set(0);
chain.check_parse_err(VirtioBlockError::DescriptorLengthTooSmall);
// Fix status descriptor length.
chain.status_desc.len.set(0x1000);
// Invalid guest address for the status descriptor. Parsing will still succeed
// as the operation that will fail happens when executing the request.
chain.status_desc.addr.set(mem.last_addr().raw_value());
chain.check_parse(true);
// Fix status descriptor addr.
chain.status_desc.addr.set(0x3000);
// Invalid guest address for the data descriptor. Parsing will still succeed
// as the operation that will fail happens when executing the request.
chain.data_desc.addr.set(mem.last_addr().raw_value());
chain.check_parse(true);
// Fix data descriptor addr.
chain.data_desc.addr.set(0x2000);
chain.check_parse(true);
}
#[test]
fn test_parse_in() {
let mem = &default_mem();
let queue = VirtQueue::new(GuestAddress(0), mem, 16);
let chain = RequestDescriptorChain::new(&queue);
let mut request_header = RequestHeader::new(VIRTIO_BLK_T_IN, 99);
chain.set_header(request_header);
// Read only data descriptor for IN.
chain.data_desc.flags.set(VIRTQ_DESC_F_NEXT);
chain.check_parse_err(VirtioBlockError::UnexpectedReadOnlyDescriptor);
// data_len is not multiple of 512 for IN.
chain
.data_desc
.flags
.set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);
chain.data_desc.len.set(513);
chain.check_parse_err(VirtioBlockError::InvalidDataLength);
// sector is to big.
request_header.sector = NUM_DISK_SECTORS;
chain.data_desc.len.set(512);
chain.set_header(request_header);
chain.check_parse_err(VirtioBlockError::InvalidOffset);
// Fix data descriptor.
request_header.sector = NUM_DISK_SECTORS - 1;
chain.set_header(request_header);
chain.check_parse(true);
}
#[test]
fn test_parse_out() {
let mem = &default_mem();
let queue = VirtQueue::new(GuestAddress(0), mem, 16);
let chain = RequestDescriptorChain::new(&queue);
let mut request_header = RequestHeader::new(VIRTIO_BLK_T_OUT, 100);
chain.set_header(request_header);
// Write only data descriptor for OUT.
chain
.data_desc
.flags
.set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);
chain.check_parse_err(VirtioBlockError::UnexpectedWriteOnlyDescriptor);
// data_len is not multiple of 512 for IN.
chain.data_desc.flags.set(VIRTQ_DESC_F_NEXT);
chain.data_desc.len.set(1000);
chain.check_parse_err(VirtioBlockError::InvalidDataLength);
// sector is to big.
request_header.sector = NUM_DISK_SECTORS - 1;
chain.data_desc.len.set(1024);
chain.set_header(request_header);
chain.check_parse_err(VirtioBlockError::InvalidOffset);
// Fix header descriptor.
request_header.sector = NUM_DISK_SECTORS - 2;
chain.set_header(request_header);
chain.check_parse(true);
}
#[test]
fn test_parse_flush() {
let mem = &default_mem();
let queue = VirtQueue::new(GuestAddress(0), mem, 16);
let chain = RequestDescriptorChain::new(&queue);
// Flush request with a data descriptor.
let request_header = RequestHeader::new(VIRTIO_BLK_T_FLUSH, 50);
chain.set_header(request_header);
chain.check_parse(true);
// Flush request without a data descriptor.
chain.header_desc.next.set(2);
chain.check_parse(false);
}
#[test]
fn test_parse_get_id() {
let mem = &default_mem();
let queue = VirtQueue::new(GuestAddress(0), mem, 16);
let chain = RequestDescriptorChain::new(&queue);
let request_header = RequestHeader::new(VIRTIO_BLK_T_GET_ID, 15);
chain.set_header(request_header);
// Read only data descriptor for GetDeviceId.
chain.data_desc.flags.set(VIRTQ_DESC_F_NEXT);
chain.check_parse_err(VirtioBlockError::UnexpectedReadOnlyDescriptor);
// data_len is < VIRTIO_BLK_ID_BYTES for GetDeviceID.
chain
.data_desc
.flags
.set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);
chain.data_desc.len.set(VIRTIO_BLK_ID_BYTES - 1);
chain.check_parse_err(VirtioBlockError::InvalidDataLength);
chain.data_desc.len.set(VIRTIO_BLK_ID_BYTES);
chain.check_parse(true);
}
use std::convert::TryInto;
/// -------------------------------------
/// BEGIN PROPERTY BASED TESTING
use proptest::arbitrary::Arbitrary;
use proptest::prelude::*;
use proptest::strategy::{Map, Strategy, TupleUnion};
use crate::devices::virtio::block::virtio::test_utils::RequestDescriptorChain;
use crate::test_utils::{multi_region_mem, single_region_mem};
// Implements a "strategy" for producing arbitrary values of RequestType.
// This can also be generated by a derive macro from `proptest_derive`, but the crate
// is currently experimental.
// Since we are dealing with a very complex type we need to turn off the clippy
// warning.
#[allow(clippy::type_complexity)]
impl Arbitrary for RequestType {
type Parameters = <u32 as Arbitrary>::Parameters;
// Tuple union will hold the strategies that we use to generate the request type.
// The first element is the weight of the strategy, the second is a function that
// returns the strategy value.
type Strategy = TupleUnion<(
(u32, std::sync::Arc<fn() -> Self>),
(u32, std::sync::Arc<fn() -> Self>),
(u32, std::sync::Arc<fn() -> Self>),
(u32, std::sync::Arc<fn() -> Self>),
(
u32,
std::sync::Arc<Map<<u32 as Arbitrary>::Strategy, fn(u32) -> Self>>,
),
)>;
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
// All strategies have the same weight, there is no reson currently to skew
// the rations to increase the odds of a specific request type.
TupleUnion::new((
(1u32, std::sync::Arc::new(|| RequestType::In {})),
(1u32, std::sync::Arc::new(|| RequestType::Out {})),
(1u32, std::sync::Arc::new(|| RequestType::Flush {})),
(1u32, std::sync::Arc::new(|| RequestType::GetDeviceID {})),
(
1u32,
std::sync::Arc::new(Strategy::prop_map(any::<u32>(), |id| {
// Random unsupported requests for our implementation start at
// VIRTIO_BLK_T_GET_ID + 1 = 9.
// This can be further refined to include unsupported requests ids < 9.
RequestType::Unsupported(id.checked_add(9).unwrap_or(9))
})),
),
))
}
}
impl From<RequestType> for u32 {
fn from(request_type: RequestType) -> u32 {
match request_type {
RequestType::In => VIRTIO_BLK_T_IN,
RequestType::Out => VIRTIO_BLK_T_OUT,
RequestType::Flush => VIRTIO_BLK_T_FLUSH,
RequestType::GetDeviceID => VIRTIO_BLK_T_GET_ID,
RequestType::Unsupported(id) => id,
}
}
}
// Returns flags based on the request type.
fn request_type_flags(request_type: RequestType) -> u16 {
match request_type {
RequestType::In => VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE,
RequestType::Out => VIRTQ_DESC_F_NEXT,
RequestType::Flush => VIRTQ_DESC_F_NEXT,
RequestType::GetDeviceID => VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE,
RequestType::Unsupported(_) => VIRTQ_DESC_F_NEXT,
}
}
#[allow(clippy::let_with_type_underscore)]
fn random_request_parse()
-> impl Strategy<Value = (Result<Request, VirtioBlockError>, GuestMemoryMmap, Queue)> {
// In this strategy we are going to generate random Requests/Errors and map them
// to an input descriptor chain.
//
// We will check that Request::parse() arrives at the same result after
// parsing the descriptor chain. Input properties are validated and commented below.
(
any::<u64>(), // random data buffer sparsity factor
any::<u32>(), // data_len
any::<u64>(), // sector
any::<RequestType>(), // request type
any::<[bool; 10]>(), // coin
)
.prop_map(|(sparsity, data_len, sector, request_type, coins)| {
(
sparsity,
data_len,
sector,
request_type,
request_type.into(),
coins,
)
})
.prop_map(
|(sparsity, data_len, sector, request_type, virtio_request_id, coins)| {
do_random_request_parse(
sparsity,
data_len,
sector,
request_type,
virtio_request_id,
&coins,
)
},
)
}
fn do_random_request_parse(
sparsity: u64,
data_len: u32,
sector: u64,
request_type: RequestType,
virtio_request_id: u32,
coins_arr: &[bool],
) -> (Result<Request, VirtioBlockError>, GuestMemoryMmap, Queue) {
let coins = &mut coins_arr.iter();
// Randomize descriptor addresses. Assumed page size as max buffer len.
let base_addr = sparsity & 0x0000_FFFF_FFFF_F000; // 48 bit base, page aligned.
let max_desc_len: u32 = 0x1000;
// First addr starts at page base + 1.
let req_type_addr = GuestAddress(base_addr).checked_add(0x1000).unwrap();
// Use first 4 bits of randomness to shift the gap size between this descriptor
// and the next one.
let mut next_desc_dist = u64::from(max_desc_len) + (0x1000 << (sparsity & 0xF));
let data_addr = req_type_addr.checked_add(next_desc_dist).unwrap();
// Use next 4 bits of randomness to shift gap size between this descriptor
// and the next one.
next_desc_dist = u64::from(max_desc_len) + (0x1000 << ((sparsity & 0xF0) >> 4));
let status_addr = data_addr.checked_add(next_desc_dist).unwrap();
let mem_end = status_addr.checked_add(u64::from(max_desc_len)).unwrap();
let mem = multi_region_mem(&[(
GuestAddress(base_addr),
(mem_end.0 - base_addr).try_into().unwrap(),
)]);
let vq = VirtQueue::new(GuestAddress(base_addr), &mem, 16);
let chain = RequestDescriptorChain::new(&vq);
let q = vq.create_queue();
// Make sure that data_len is a multiple of 512
// and that 512 <= data_len <= (4096 + 512).
let valid_data_len = ((data_len & 4096) | (SECTOR_SIZE - 1)) + 1;
let sectors_len = u64::from(valid_data_len / SECTOR_SIZE);
// Craft a random request with the randomized parameters.
let mut request = Request {
r#type: request_type,
data_len: valid_data_len,
status_addr,
sector: sector & (NUM_DISK_SECTORS - sectors_len),
data_addr,
};
let mut request_header = RequestHeader::new(virtio_request_id, request.sector);
chain.header_desc.addr.set(req_type_addr.0);
chain.header_desc.len.set(max_desc_len);
chain.set_header(request_header);
// Flush requests have no data desc.
if request.r#type == RequestType::Flush {
request.data_addr = GuestAddress(0);
request.data_len = 0;
chain.header_desc.next.set(2);
} else {
chain.data_desc.set(
request.data_addr.0,
request.data_len,
request_type_flags(request.r#type),
2,
);
}
chain
.status_desc
.set(request.status_addr.0, 1, VIRTQ_DESC_F_WRITE, 0);
// Flip a coin - should we generate a valid request or an error.
if *coins.next().unwrap() {
return (Ok(request), mem, q);
}
// This is the initial correct value.
let data_desc_flags = &chain.data_desc.flags;
// Flip coin - corrupt the status desc len.
if *coins.next().unwrap() {
chain.status_desc.len.set(0);
return (Err(VirtioBlockError::DescriptorLengthTooSmall), mem, q);
}
// Flip coin - corrupt data desc next flag.
// Exception: flush requests do not have data desc.
if *coins.next().unwrap() && request.r#type != RequestType::Flush {
data_desc_flags.set(data_desc_flags.get() & !VIRTQ_DESC_F_NEXT);
return (Err(VirtioBlockError::DescriptorChainTooShort), mem, q);
}
// Flip coin - req type desc is write only.
if *coins.next().unwrap() {
let hdr_desc_flags = &chain.header_desc.flags;
hdr_desc_flags.set(hdr_desc_flags.get() | VIRTQ_DESC_F_WRITE);
return (Err(VirtioBlockError::UnexpectedWriteOnlyDescriptor), mem, q);
}
// Corrupt data desc accessibility
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/io/sync_io.rs | src/vmm/src/devices/virtio/block/virtio/io/sync_io.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::File;
use std::io::{Seek, SeekFrom, Write};
use vm_memory::{GuestMemoryError, ReadVolatile, WriteVolatile};
use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum SyncIoError {
/// Flush: {0}
Flush(std::io::Error),
/// Seek: {0}
Seek(std::io::Error),
/// SyncAll: {0}
SyncAll(std::io::Error),
/// Transfer: {0}
Transfer(GuestMemoryError),
}
#[derive(Debug)]
pub struct SyncFileEngine {
file: File,
}
// SAFETY: `File` is send and ultimately a POD.
unsafe impl Send for SyncFileEngine {}
impl SyncFileEngine {
pub fn from_file(file: File) -> SyncFileEngine {
SyncFileEngine { file }
}
#[cfg(test)]
pub fn file(&self) -> &File {
&self.file
}
/// Update the backing file of the engine
pub fn update_file(&mut self, file: File) {
self.file = file
}
pub fn read(
&mut self,
offset: u64,
mem: &GuestMemoryMmap,
addr: GuestAddress,
count: u32,
) -> Result<u32, SyncIoError> {
self.file
.seek(SeekFrom::Start(offset))
.map_err(SyncIoError::Seek)?;
mem.get_slice(addr, count as usize)
.and_then(|mut slice| Ok(self.file.read_exact_volatile(&mut slice)?))
.map_err(SyncIoError::Transfer)?;
Ok(count)
}
pub fn write(
&mut self,
offset: u64,
mem: &GuestMemoryMmap,
addr: GuestAddress,
count: u32,
) -> Result<u32, SyncIoError> {
self.file
.seek(SeekFrom::Start(offset))
.map_err(SyncIoError::Seek)?;
mem.get_slice(addr, count as usize)
.and_then(|slice| Ok(self.file.write_all_volatile(&slice)?))
.map_err(SyncIoError::Transfer)?;
Ok(count)
}
pub fn flush(&mut self) -> Result<(), SyncIoError> {
// flush() first to force any cached data out of rust buffers.
self.file.flush().map_err(SyncIoError::Flush)?;
// Sync data out to physical media on host.
self.file.sync_all().map_err(SyncIoError::SyncAll)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/io/mod.rs | src/vmm/src/devices/virtio/block/virtio/io/mod.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod async_io;
pub mod sync_io;
use std::fmt::Debug;
use std::fs::File;
pub use self::async_io::{AsyncFileEngine, AsyncIoError};
pub use self::sync_io::{SyncFileEngine, SyncIoError};
use crate::devices::virtio::block::virtio::PendingRequest;
use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
#[derive(Debug)]
pub struct RequestOk {
pub req: PendingRequest,
pub count: u32,
}
#[derive(Debug)]
pub enum FileEngineOk {
Submitted,
Executed(RequestOk),
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum BlockIoError {
/// Sync error: {0}
Sync(SyncIoError),
/// Async error: {0}
Async(AsyncIoError),
}
impl BlockIoError {
pub fn is_throttling_err(&self) -> bool {
match self {
BlockIoError::Async(AsyncIoError::IoUring(err)) => err.is_throttling_err(),
_ => false,
}
}
}
#[derive(Debug)]
pub struct RequestError<E> {
pub req: PendingRequest,
pub error: E,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub enum FileEngine {
#[allow(unused)]
Async(AsyncFileEngine),
Sync(SyncFileEngine),
}
impl FileEngine {
pub fn from_file(file: File, engine_type: FileEngineType) -> Result<FileEngine, BlockIoError> {
match engine_type {
FileEngineType::Async => Ok(FileEngine::Async(
AsyncFileEngine::from_file(file).map_err(BlockIoError::Async)?,
)),
FileEngineType::Sync => Ok(FileEngine::Sync(SyncFileEngine::from_file(file))),
}
}
pub fn update_file_path(&mut self, file: File) -> Result<(), BlockIoError> {
match self {
FileEngine::Async(engine) => engine.update_file(file).map_err(BlockIoError::Async)?,
FileEngine::Sync(engine) => engine.update_file(file),
};
Ok(())
}
#[cfg(test)]
pub fn file(&self) -> &File {
match self {
FileEngine::Async(engine) => engine.file(),
FileEngine::Sync(engine) => engine.file(),
}
}
pub fn read(
&mut self,
offset: u64,
mem: &GuestMemoryMmap,
addr: GuestAddress,
count: u32,
req: PendingRequest,
) -> Result<FileEngineOk, RequestError<BlockIoError>> {
match self {
FileEngine::Async(engine) => match engine.push_read(offset, mem, addr, count, req) {
Ok(_) => Ok(FileEngineOk::Submitted),
Err(err) => Err(RequestError {
req: err.req,
error: BlockIoError::Async(err.error),
}),
},
FileEngine::Sync(engine) => match engine.read(offset, mem, addr, count) {
Ok(count) => Ok(FileEngineOk::Executed(RequestOk { req, count })),
Err(err) => Err(RequestError {
req,
error: BlockIoError::Sync(err),
}),
},
}
}
pub fn write(
&mut self,
offset: u64,
mem: &GuestMemoryMmap,
addr: GuestAddress,
count: u32,
req: PendingRequest,
) -> Result<FileEngineOk, RequestError<BlockIoError>> {
match self {
FileEngine::Async(engine) => match engine.push_write(offset, mem, addr, count, req) {
Ok(_) => Ok(FileEngineOk::Submitted),
Err(err) => Err(RequestError {
req: err.req,
error: BlockIoError::Async(err.error),
}),
},
FileEngine::Sync(engine) => match engine.write(offset, mem, addr, count) {
Ok(count) => Ok(FileEngineOk::Executed(RequestOk { req, count })),
Err(err) => Err(RequestError {
req,
error: BlockIoError::Sync(err),
}),
},
}
}
pub fn flush(
&mut self,
req: PendingRequest,
) -> Result<FileEngineOk, RequestError<BlockIoError>> {
match self {
FileEngine::Async(engine) => match engine.push_flush(req) {
Ok(_) => Ok(FileEngineOk::Submitted),
Err(err) => Err(RequestError {
req: err.req,
error: BlockIoError::Async(err.error),
}),
},
FileEngine::Sync(engine) => match engine.flush() {
Ok(_) => Ok(FileEngineOk::Executed(RequestOk { req, count: 0 })),
Err(err) => Err(RequestError {
req,
error: BlockIoError::Sync(err),
}),
},
}
}
pub fn drain(&mut self, discard: bool) -> Result<(), BlockIoError> {
match self {
FileEngine::Async(engine) => engine.drain(discard).map_err(BlockIoError::Async),
FileEngine::Sync(_engine) => Ok(()),
}
}
pub fn drain_and_flush(&mut self, discard: bool) -> Result<(), BlockIoError> {
match self {
FileEngine::Async(engine) => {
engine.drain_and_flush(discard).map_err(BlockIoError::Async)
}
FileEngine::Sync(engine) => engine.flush().map_err(BlockIoError::Sync),
}
}
}
#[cfg(test)]
pub mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::ffi::OsStrExt;
use vm_memory::GuestMemoryRegion;
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::utils::u64_to_usize;
use crate::vmm_config::machine_config::HugePageConfig;
use crate::vstate::memory;
use crate::vstate::memory::{Bitmap, Bytes, GuestMemory, GuestRegionMmapExt};
const FILE_LEN: u32 = 1024;
// 2 pages of memory should be enough to test read/write ops and also dirty tracking.
const MEM_LEN: usize = 8192;
macro_rules! assert_sync_execution {
($expression:expr, $count:expr) => {
match $expression {
Ok(FileEngineOk::Executed(RequestOk { req: _, count })) => {
assert_eq!(count, $count)
}
other => panic!(
"Expected: Ok(FileEngineOk::Executed(UserDataOk {{ user_data: _, count: {} \
}})), got: {:?}",
$count, other
),
}
};
}
macro_rules! assert_queued {
($expression:expr) => {
assert!(matches!($expression, Ok(FileEngineOk::Submitted)))
};
}
fn assert_async_execution(mem: &GuestMemoryMmap, engine: &mut FileEngine, count: u32) {
if let FileEngine::Async(engine) = engine {
engine.drain(false).unwrap();
assert_eq!(engine.pop(mem).unwrap().unwrap().result().unwrap(), count);
}
}
fn create_mem() -> GuestMemoryMmap {
GuestMemoryMmap::from_regions(
memory::anonymous(
[(GuestAddress(0), MEM_LEN)].into_iter(),
true,
HugePageConfig::None,
)
.unwrap()
.into_iter()
.map(|region| GuestRegionMmapExt::dram_from_mmap_region(region, 0))
.collect(),
)
.unwrap()
}
fn check_dirty_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: u32) {
let bitmap = mem.find_region(addr).unwrap().bitmap();
for offset in addr.0..addr.0 + u64::from(len) {
assert!(bitmap.dirty_at(u64_to_usize(offset)));
}
}
fn check_clean_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: u32) {
let bitmap = mem.find_region(addr).unwrap().bitmap();
for offset in addr.0..addr.0 + u64::from(len) {
assert!(!bitmap.dirty_at(u64_to_usize(offset)));
}
}
#[test]
fn test_sync() {
let mem = create_mem();
// Create backing file.
let file = TempFile::new().unwrap().into_file();
let mut engine = FileEngine::from_file(file, FileEngineType::Sync).unwrap();
let data = vmm_sys_util::rand::rand_alphanumerics(FILE_LEN as usize)
.as_bytes()
.to_vec();
// Partial write
let partial_len = 50;
let addr = GuestAddress(MEM_LEN as u64 - u64::from(partial_len));
mem.write(&data, addr).unwrap();
assert_sync_execution!(
engine.write(0, &mem, addr, partial_len, PendingRequest::default()),
partial_len
);
// Partial read
let mem = create_mem();
assert_sync_execution!(
engine.read(0, &mem, addr, partial_len, PendingRequest::default()),
partial_len
);
// Check data
let mut buf = vec![0u8; partial_len as usize];
mem.read_slice(&mut buf, addr).unwrap();
assert_eq!(buf, data[..partial_len as usize]);
// Offset write
let offset = 100;
let partial_len = 50;
let addr = GuestAddress(0);
mem.write(&data, addr).unwrap();
assert_sync_execution!(
engine.write(offset, &mem, addr, partial_len, PendingRequest::default()),
partial_len
);
// Offset read
let mem = create_mem();
assert_sync_execution!(
engine.read(offset, &mem, addr, partial_len, PendingRequest::default()),
partial_len
);
// Check data
let mut buf = vec![0u8; partial_len as usize];
mem.read_slice(&mut buf, addr).unwrap();
assert_eq!(buf, data[..partial_len as usize]);
// Full write
mem.write(&data, GuestAddress(0)).unwrap();
assert_sync_execution!(
engine.write(
0,
&mem,
GuestAddress(0),
FILE_LEN,
PendingRequest::default()
),
FILE_LEN
);
// Full read
let mem = create_mem();
assert_sync_execution!(
engine.read(
0,
&mem,
GuestAddress(0),
FILE_LEN,
PendingRequest::default()
),
FILE_LEN
);
// Check data
let mut buf = vec![0u8; FILE_LEN as usize];
mem.read_slice(&mut buf, GuestAddress(0)).unwrap();
assert_eq!(buf, data.as_slice());
// Check other ops
engine.flush(PendingRequest::default()).unwrap();
engine.drain(true).unwrap();
engine.drain_and_flush(true).unwrap();
}
#[test]
fn test_async() {
// Create backing file.
let file = TempFile::new().unwrap().into_file();
let mut engine = FileEngine::from_file(file, FileEngineType::Async).unwrap();
let data = vmm_sys_util::rand::rand_alphanumerics(FILE_LEN as usize)
.as_bytes()
.to_vec();
// Partial reads and writes cannot really be tested because io_uring will return an error
// code for trying to write to unmapped memory.
// Offset write
let mem = create_mem();
let offset = 100;
let partial_len = 50;
let addr = GuestAddress(0);
mem.write(&data, addr).unwrap();
assert_queued!(engine.write(offset, &mem, addr, partial_len, PendingRequest::default()));
assert_async_execution(&mem, &mut engine, partial_len);
// Offset read
let mem = create_mem();
assert_queued!(engine.read(offset, &mem, addr, partial_len, PendingRequest::default()));
assert_async_execution(&mem, &mut engine, partial_len);
// Check data
let mut buf = vec![0u8; partial_len as usize];
mem.read_slice(&mut buf, addr).unwrap();
assert_eq!(buf, data[..partial_len as usize]);
// check dirty mem
check_dirty_mem(&mem, addr, partial_len);
check_clean_mem(&mem, GuestAddress(4096), 4096);
// Full write
mem.write(&data, GuestAddress(0)).unwrap();
assert_queued!(engine.write(0, &mem, addr, FILE_LEN, PendingRequest::default()));
assert_async_execution(&mem, &mut engine, FILE_LEN);
// Full read
let mem = create_mem();
assert_queued!(engine.read(0, &mem, addr, FILE_LEN, PendingRequest::default()));
assert_async_execution(&mem, &mut engine, FILE_LEN);
// Check data
let mut buf = vec![0u8; FILE_LEN as usize];
mem.read_slice(&mut buf, GuestAddress(0)).unwrap();
assert_eq!(buf, data.as_slice());
// check dirty mem
check_dirty_mem(&mem, addr, FILE_LEN);
check_clean_mem(&mem, GuestAddress(4096), 4096);
// Check other ops
assert_queued!(engine.flush(PendingRequest::default()));
assert_async_execution(&mem, &mut engine, 0);
engine.drain(true).unwrap();
engine.drain_and_flush(true).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/virtio/io/async_io.rs | src/vmm/src/devices/virtio/block/virtio/io/async_io.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use std::fs::File;
use std::os::fd::RawFd;
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryError;
use vmm_sys_util::eventfd::EventFd;
use crate::devices::virtio::block::virtio::io::RequestError;
use crate::devices::virtio::block::virtio::{IO_URING_NUM_ENTRIES, PendingRequest};
use crate::io_uring::operation::{Cqe, OpCode, Operation};
use crate::io_uring::restriction::Restriction;
use crate::io_uring::{IoUring, IoUringError};
use crate::logger::log_dev_preview_warning;
use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryExtension, GuestMemoryMmap};
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum AsyncIoError {
/// IO: {0}
IO(std::io::Error),
/// IoUring: {0}
IoUring(IoUringError),
/// Submit: {0}
Submit(std::io::Error),
/// SyncAll: {0}
SyncAll(std::io::Error),
/// EventFd: {0}
EventFd(std::io::Error),
/// GuestMemory: {0}
GuestMemory(GuestMemoryError),
}
#[derive(Debug)]
pub struct AsyncFileEngine {
file: File,
ring: IoUring<WrappedRequest>,
completion_evt: EventFd,
}
#[derive(Debug)]
pub struct WrappedRequest {
addr: Option<GuestAddress>,
req: PendingRequest,
}
impl WrappedRequest {
fn new(req: PendingRequest) -> Self {
WrappedRequest { addr: None, req }
}
fn new_with_dirty_tracking(addr: GuestAddress, req: PendingRequest) -> Self {
WrappedRequest {
addr: Some(addr),
req,
}
}
fn mark_dirty_mem_and_unwrap(self, mem: &GuestMemoryMmap, count: u32) -> PendingRequest {
if let Some(addr) = self.addr {
mem.mark_dirty(addr, count as usize)
}
self.req
}
}
impl AsyncFileEngine {
fn new_ring(
file: &File,
completion_fd: RawFd,
) -> Result<IoUring<WrappedRequest>, IoUringError> {
IoUring::new(
u32::from(IO_URING_NUM_ENTRIES),
vec![file],
vec![
// Make sure we only allow operations on pre-registered fds.
Restriction::RequireFixedFds,
// Allowlist of opcodes.
Restriction::AllowOpCode(OpCode::Read),
Restriction::AllowOpCode(OpCode::Write),
Restriction::AllowOpCode(OpCode::Fsync),
],
Some(completion_fd),
)
}
pub fn from_file(file: File) -> Result<AsyncFileEngine, AsyncIoError> {
log_dev_preview_warning("Async file IO", Option::None);
let completion_evt = EventFd::new(libc::EFD_NONBLOCK).map_err(AsyncIoError::EventFd)?;
let ring =
Self::new_ring(&file, completion_evt.as_raw_fd()).map_err(AsyncIoError::IoUring)?;
Ok(AsyncFileEngine {
file,
ring,
completion_evt,
})
}
pub fn update_file(&mut self, file: File) -> Result<(), AsyncIoError> {
let ring = Self::new_ring(&file, self.completion_evt.as_raw_fd())
.map_err(AsyncIoError::IoUring)?;
self.file = file;
self.ring = ring;
Ok(())
}
#[cfg(test)]
pub fn file(&self) -> &File {
&self.file
}
pub fn completion_evt(&self) -> &EventFd {
&self.completion_evt
}
pub fn push_read(
&mut self,
offset: u64,
mem: &GuestMemoryMmap,
addr: GuestAddress,
count: u32,
req: PendingRequest,
) -> Result<(), RequestError<AsyncIoError>> {
let buf = match mem.get_slice(addr, count as usize) {
Ok(slice) => slice.ptr_guard_mut().as_ptr(),
Err(err) => {
return Err(RequestError {
req,
error: AsyncIoError::GuestMemory(err),
});
}
};
let wrapped_user_data = WrappedRequest::new_with_dirty_tracking(addr, req);
self.ring
.push(Operation::read(
0,
buf as usize,
count,
offset,
wrapped_user_data,
))
.map_err(|(io_uring_error, data)| RequestError {
req: data.req,
error: AsyncIoError::IoUring(io_uring_error),
})
}
pub fn push_write(
&mut self,
offset: u64,
mem: &GuestMemoryMmap,
addr: GuestAddress,
count: u32,
req: PendingRequest,
) -> Result<(), RequestError<AsyncIoError>> {
let buf = match mem.get_slice(addr, count as usize) {
Ok(slice) => slice.ptr_guard_mut().as_ptr(),
Err(err) => {
return Err(RequestError {
req,
error: AsyncIoError::GuestMemory(err),
});
}
};
let wrapped_user_data = WrappedRequest::new(req);
self.ring
.push(Operation::write(
0,
buf as usize,
count,
offset,
wrapped_user_data,
))
.map_err(|(io_uring_error, data)| RequestError {
req: data.req,
error: AsyncIoError::IoUring(io_uring_error),
})
}
pub fn push_flush(&mut self, req: PendingRequest) -> Result<(), RequestError<AsyncIoError>> {
let wrapped_user_data = WrappedRequest::new(req);
self.ring
.push(Operation::fsync(0, wrapped_user_data))
.map_err(|(io_uring_error, data)| RequestError {
req: data.req,
error: AsyncIoError::IoUring(io_uring_error),
})
}
pub fn kick_submission_queue(&mut self) -> Result<(), AsyncIoError> {
self.ring
.submit()
.map(|_| ())
.map_err(AsyncIoError::IoUring)
}
pub fn drain(&mut self, discard_cqes: bool) -> Result<(), AsyncIoError> {
self.ring
.submit_and_wait_all()
.map(|_| ())
.map_err(AsyncIoError::IoUring)?;
if discard_cqes {
// Drain the completion queue so that we may deallocate the user_data fields.
while self.do_pop()?.is_some() {}
}
Ok(())
}
pub fn drain_and_flush(&mut self, discard_cqes: bool) -> Result<(), AsyncIoError> {
self.drain(discard_cqes)?;
// Sync data out to physical media on host.
// We don't need to call flush first since all the ops are performed through io_uring
// and Rust shouldn't manage any data in its internal buffers.
self.file.sync_all().map_err(AsyncIoError::SyncAll)?;
Ok(())
}
fn do_pop(&mut self) -> Result<Option<Cqe<WrappedRequest>>, AsyncIoError> {
self.ring.pop().map_err(AsyncIoError::IoUring)
}
pub fn pop(
&mut self,
mem: &GuestMemoryMmap,
) -> Result<Option<Cqe<PendingRequest>>, AsyncIoError> {
let cqe = self.do_pop()?.map(|cqe| {
let count = cqe.count();
cqe.map_user_data(|wrapped_user_data| {
wrapped_user_data.mark_dirty_mem_and_unwrap(mem, count)
})
});
Ok(cqe)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/vhost_user/persist.rs | src/vmm/src/devices/virtio/block/vhost_user/persist.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring block devices.
use serde::{Deserialize, Serialize};
use super::VhostUserBlockError;
use super::device::VhostUserBlock;
use crate::devices::virtio::block::CacheType;
use crate::devices::virtio::block::persist::BlockConstructorArgs;
use crate::devices::virtio::persist::VirtioDeviceState;
use crate::snapshot::Persist;
/// vhost-user block device state.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VhostUserBlockState {
id: String,
partuuid: Option<String>,
cache_type: CacheType,
root_device: bool,
socket_path: String,
vu_acked_protocol_features: u64,
config_space: Vec<u8>,
virtio_state: VirtioDeviceState,
}
impl Persist<'_> for VhostUserBlock {
type State = VhostUserBlockState;
type ConstructorArgs = BlockConstructorArgs;
type Error = VhostUserBlockError;
fn save(&self) -> Self::State {
unimplemented!("VhostUserBlock does not support snapshotting yet");
}
fn restore(
_constructor_args: Self::ConstructorArgs,
_state: &Self::State,
) -> Result<Self, Self::Error> {
Err(VhostUserBlockError::SnapshottingNotSupported)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/vhost_user/device.rs | src/vmm/src/devices/virtio/block/vhost_user/device.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Portions Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::ops::Deref;
use std::sync::Arc;
use log::error;
use utils::time::{ClockType, get_time_us};
use vhost::vhost_user::Frontend;
use vhost::vhost_user::message::*;
use vmm_sys_util::eventfd::EventFd;
use super::{NUM_QUEUES, QUEUE_SIZE, VhostUserBlockError};
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::block::CacheType;
use crate::devices::virtio::device::{ActiveState, DeviceState, VirtioDevice};
use crate::devices::virtio::generated::virtio_blk::{VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_RO};
use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_BLOCK;
use crate::devices::virtio::generated::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use crate::devices::virtio::queue::Queue;
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::devices::virtio::vhost_user::{VhostUserHandleBackend, VhostUserHandleImpl};
use crate::devices::virtio::vhost_user_metrics::{
VhostUserDeviceMetrics, VhostUserMetricsPerDevice,
};
use crate::impl_device_type;
use crate::logger::{IncMetric, StoreMetric, log_dev_preview_warning};
use crate::utils::u64_to_usize;
use crate::vmm_config::drive::BlockDeviceConfig;
use crate::vstate::memory::GuestMemoryMmap;
/// Block device config space size in bytes.
const BLOCK_CONFIG_SPACE_SIZE: u32 = 60;
const AVAILABLE_FEATURES: u64 = (1 << VIRTIO_F_VERSION_1)
| (1 << VIRTIO_RING_F_EVENT_IDX)
// vhost-user specific bit. Not defined in standard virtio spec.
// Specifies ability of frontend to negotiate protocol features.
| VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits()
// We always try to negotiate readonly with the backend.
// If the backend is configured as readonly, we will accept it.
| (1 << VIRTIO_BLK_F_RO);
/// Use this structure to set up the Block Device before booting the kernel.
#[derive(Debug, PartialEq, Eq)]
pub struct VhostUserBlockConfig {
/// Unique identifier of the drive.
pub drive_id: String,
/// Part-UUID. Represents the unique id of the boot partition of this device. It is
/// optional and it will be used only if the `is_root_device` field is true.
pub partuuid: Option<String>,
/// If set to true, it makes the current device the root block device.
/// Setting this flag to true will mount the block device in the
/// guest under /dev/vda unless the partuuid is present.
pub is_root_device: bool,
/// If set to true, the drive will ignore flush requests coming from
/// the guest driver.
pub cache_type: CacheType,
/// Socket path of the vhost-user process
pub socket: String,
}
impl TryFrom<&BlockDeviceConfig> for VhostUserBlockConfig {
type Error = VhostUserBlockError;
fn try_from(value: &BlockDeviceConfig) -> Result<Self, Self::Error> {
if value.socket.is_some()
&& value.is_read_only.is_none()
&& value.path_on_host.is_none()
&& value.rate_limiter.is_none()
&& value.file_engine_type.is_none()
{
Ok(Self {
drive_id: value.drive_id.clone(),
partuuid: value.partuuid.clone(),
is_root_device: value.is_root_device,
cache_type: value.cache_type,
socket: value.socket.as_ref().unwrap().clone(),
})
} else {
Err(VhostUserBlockError::Config)
}
}
}
impl From<VhostUserBlockConfig> for BlockDeviceConfig {
fn from(value: VhostUserBlockConfig) -> Self {
Self {
drive_id: value.drive_id,
partuuid: value.partuuid,
is_root_device: value.is_root_device,
cache_type: value.cache_type,
is_read_only: None,
path_on_host: None,
rate_limiter: None,
file_engine_type: None,
socket: Some(value.socket),
}
}
}
pub type VhostUserBlock = VhostUserBlockImpl<Frontend>;
/// vhost-user block device.
pub struct VhostUserBlockImpl<T: VhostUserHandleBackend> {
// Virtio fields.
pub avail_features: u64,
pub acked_features: u64,
pub config_space: Vec<u8>,
pub activate_evt: EventFd,
// Transport related fields.
pub queues: Vec<Queue>,
pub queue_evts: [EventFd; u64_to_usize(NUM_QUEUES)],
pub device_state: DeviceState,
// Implementation specific fields.
pub id: String,
pub partuuid: Option<String>,
pub cache_type: CacheType,
pub root_device: bool,
pub read_only: bool,
// Vhost user protocol handle
pub vu_handle: VhostUserHandleImpl<T>,
pub vu_acked_protocol_features: u64,
pub metrics: Arc<VhostUserDeviceMetrics>,
}
// Need custom implementation because otherwise `Debug` is required for `vhost::Master`
impl<T: VhostUserHandleBackend> std::fmt::Debug for VhostUserBlockImpl<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("VhostUserBlockImpl")
.field("avail_features", &self.avail_features)
.field("acked_features", &self.acked_features)
.field("config_space", &self.config_space)
.field("activate_evt", &self.activate_evt)
.field("queues", &self.queues)
.field("queue_evts", &self.queue_evts)
.field("device_state", &self.device_state)
.field("id", &self.id)
.field("partuuid", &self.partuuid)
.field("cache_type", &self.cache_type)
.field("root_device", &self.root_device)
.field("read_only", &self.read_only)
.field("vu_handle", &self.vu_handle)
.field(
"vu_acked_protocol_features",
&self.vu_acked_protocol_features,
)
.field("metrics", &self.metrics)
.finish()
}
}
impl<T: VhostUserHandleBackend> VhostUserBlockImpl<T> {
pub fn new(config: VhostUserBlockConfig) -> Result<Self, VhostUserBlockError> {
log_dev_preview_warning("vhost-user-blk device", Option::None);
let start_time = get_time_us(ClockType::Monotonic);
let mut requested_features = AVAILABLE_FEATURES;
if config.cache_type == CacheType::Writeback {
requested_features |= 1 << VIRTIO_BLK_F_FLUSH;
}
let requested_protocol_features = VhostUserProtocolFeatures::CONFIG;
let mut vu_handle = VhostUserHandleImpl::<T>::new(&config.socket, NUM_QUEUES)
.map_err(VhostUserBlockError::VhostUser)?;
let (acked_features, acked_protocol_features) = vu_handle
.negotiate_features(requested_features, requested_protocol_features)
.map_err(VhostUserBlockError::VhostUser)?;
// Get config from backend if CONFIG is acked or use empty buffer.
let config_space =
if acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() != 0 {
// This buffer is used for config size check in vhost crate.
let buffer = [0u8; BLOCK_CONFIG_SPACE_SIZE as usize];
let (_, new_config_space) = vu_handle
.vu
.get_config(
0,
BLOCK_CONFIG_SPACE_SIZE,
VhostUserConfigFlags::WRITABLE,
&buffer,
)
.map_err(VhostUserBlockError::Vhost)?;
new_config_space
} else {
vec![]
};
let activate_evt =
EventFd::new(libc::EFD_NONBLOCK).map_err(VhostUserBlockError::EventFd)?;
let queues = vec![Queue::new(QUEUE_SIZE)];
let queue_evts = [EventFd::new(libc::EFD_NONBLOCK).map_err(VhostUserBlockError::EventFd)?;
u64_to_usize(NUM_QUEUES)];
let device_state = DeviceState::Inactive;
// We negotiated features with backend. Now these acked_features
// are available for guest driver to choose from.
let avail_features = acked_features;
let acked_features = acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
let read_only = acked_features & (1 << VIRTIO_BLK_F_RO) != 0;
let vhost_user_block_metrics_name = format!("block_{}", config.drive_id);
let metrics = VhostUserMetricsPerDevice::alloc(vhost_user_block_metrics_name);
let delta_us = get_time_us(ClockType::Monotonic) - start_time;
metrics.init_time_us.store(delta_us);
Ok(Self {
avail_features,
acked_features,
config_space,
activate_evt,
queues,
queue_evts,
device_state,
id: config.drive_id,
partuuid: config.partuuid,
cache_type: config.cache_type,
read_only,
root_device: config.is_root_device,
vu_handle,
vu_acked_protocol_features: acked_protocol_features,
metrics,
})
}
/// Prepare device for being snapshotted.
pub fn prepare_save(&mut self) {
unimplemented!("VhostUserBlock does not support snapshotting yet");
}
pub fn config(&self) -> VhostUserBlockConfig {
VhostUserBlockConfig {
drive_id: self.id.clone(),
partuuid: self.partuuid.clone(),
is_root_device: self.root_device,
cache_type: self.cache_type,
socket: self.vu_handle.socket_path.clone(),
}
}
pub fn config_update(&mut self) -> Result<(), VhostUserBlockError> {
let start_time = get_time_us(ClockType::Monotonic);
let interrupt = self
.device_state
.active_state()
.expect("Device is not initialized")
.interrupt
.clone();
// This buffer is used for config size check in vhost crate.
let buffer = [0u8; BLOCK_CONFIG_SPACE_SIZE as usize];
let (_, new_config_space) = self
.vu_handle
.vu
.get_config(
0,
BLOCK_CONFIG_SPACE_SIZE,
VhostUserConfigFlags::WRITABLE,
&buffer,
)
.map_err(VhostUserBlockError::Vhost)?;
self.config_space = new_config_space;
interrupt
.trigger(VirtioInterruptType::Config)
.map_err(VhostUserBlockError::Interrupt)?;
let delta_us = get_time_us(ClockType::Monotonic) - start_time;
self.metrics.config_change_time_us.store(delta_us);
Ok(())
}
}
impl<T: VhostUserHandleBackend + Send + 'static> VirtioDevice for VhostUserBlockImpl<T> {
impl_device_type!(VIRTIO_ID_BLOCK);
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features;
}
fn queues(&self) -> &[Queue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [Queue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_evts
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.device_state
.active_state()
.expect("Device is not initialized")
.interrupt
.deref()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
if let Some(config_space_bytes) = self.config_space.as_slice().get(u64_to_usize(offset)..) {
let len = config_space_bytes.len().min(data.len());
data[..len].copy_from_slice(&config_space_bytes[..len]);
} else {
error!("Failed to read config space");
self.metrics.cfg_fails.inc();
}
}
fn write_config(&mut self, _offset: u64, _data: &[u8]) {
// We do not advertise VIRTIO_BLK_F_CONFIG_WCE
// that would allow configuring the "writeback" field.
// Other block config fields are immutable.
}
fn activate(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
for q in self.queues.iter_mut() {
q.initialize(&mem)
.map_err(ActivateError::QueueMemoryError)?;
}
let start_time = get_time_us(ClockType::Monotonic);
// Setting features again, because now we negotiated them
// with guest driver as well.
self.vu_handle
.set_features(self.acked_features)
.and_then(|()| {
self.vu_handle.setup_backend(
&mem,
&[(0, &self.queues[0], &self.queue_evts[0])],
interrupt.clone(),
)
})
.map_err(|err| {
self.metrics.activate_fails.inc();
ActivateError::VhostUser(err)
})?;
self.device_state = DeviceState::Activated(ActiveState { mem, interrupt });
let delta_us = get_time_us(ClockType::Monotonic) - start_time;
self.metrics.activate_time_us.store(delta_us);
Ok(())
}
fn is_activated(&self) -> bool {
self.device_state.is_activated()
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::net::UnixStream;
use std::sync::atomic::Ordering;
use vhost::{VhostUserMemoryRegionInfo, VringConfigData};
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::devices::virtio::test_utils::{VirtQueue, default_interrupt, default_mem};
use crate::devices::virtio::transport::mmio::VIRTIO_MMIO_INT_CONFIG;
use crate::devices::virtio::vhost_user::tests::create_mem;
use crate::test_utils::create_tmp_socket;
use crate::vstate::memory::GuestAddress;
#[test]
fn test_from_config() {
let block_config = BlockDeviceConfig {
drive_id: "".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: None,
path_on_host: None,
rate_limiter: None,
file_engine_type: None,
socket: Some("sock".to_string()),
};
VhostUserBlockConfig::try_from(&block_config).unwrap();
let block_config = BlockDeviceConfig {
drive_id: "".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(true),
path_on_host: Some("path".to_string()),
rate_limiter: None,
file_engine_type: Some(FileEngineType::Sync),
socket: None,
};
VhostUserBlockConfig::try_from(&block_config).unwrap_err();
let block_config = BlockDeviceConfig {
drive_id: "".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(true),
path_on_host: Some("path".to_string()),
rate_limiter: None,
file_engine_type: Some(FileEngineType::Sync),
socket: Some("sock".to_string()),
};
VhostUserBlockConfig::try_from(&block_config).unwrap_err();
}
#[test]
fn test_new_no_features() {
struct MockMaster {
sock: UnixStream,
max_queue_num: u64,
is_owner: std::cell::UnsafeCell<bool>,
features: u64,
protocol_features: VhostUserProtocolFeatures,
hdr_flags: std::cell::UnsafeCell<VhostUserHeaderFlag>,
}
impl VhostUserHandleBackend for MockMaster {
fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
Self {
sock,
max_queue_num,
is_owner: std::cell::UnsafeCell::new(false),
features: 0,
protocol_features: VhostUserProtocolFeatures::empty(),
hdr_flags: std::cell::UnsafeCell::new(VhostUserHeaderFlag::empty()),
}
}
fn set_owner(&self) -> Result<(), vhost::Error> {
unsafe { *self.is_owner.get() = true };
Ok(())
}
fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) {
unsafe { *self.hdr_flags.get() = flags };
}
fn get_features(&self) -> Result<u64, vhost::Error> {
Ok(self.features)
}
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures, vhost::Error> {
Ok(self.protocol_features)
}
fn set_protocol_features(
&mut self,
features: VhostUserProtocolFeatures,
) -> Result<(), vhost::Error> {
self.protocol_features = features;
Ok(())
}
}
let (_tmp_dir, tmp_socket_path) = create_tmp_socket();
let vhost_block_config = VhostUserBlockConfig {
drive_id: "test_drive".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
socket: tmp_socket_path.clone(),
};
let vhost_block = VhostUserBlockImpl::<MockMaster>::new(vhost_block_config).unwrap();
// If backend has no features, nothing should be negotiated and
// no flags should be set.
assert_eq!(
vhost_block
.vu_handle
.vu
.sock
.peer_addr()
.unwrap()
.as_pathname()
.unwrap()
.to_str()
.unwrap(),
&tmp_socket_path,
);
assert_eq!(vhost_block.vu_handle.vu.max_queue_num, NUM_QUEUES);
assert!(unsafe { *vhost_block.vu_handle.vu.is_owner.get() });
assert_eq!(vhost_block.avail_features, 0);
assert_eq!(vhost_block.acked_features, 0);
assert_eq!(vhost_block.vu_acked_protocol_features, 0);
assert_eq!(
unsafe { &*vhost_block.vu_handle.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::empty().bits()
);
assert!(!vhost_block.root_device);
assert!(!vhost_block.read_only);
assert_eq!(vhost_block.config_space, Vec::<u8>::new());
}
#[test]
fn test_new_all_features() {
struct MockMaster {
sock: UnixStream,
max_queue_num: u64,
is_owner: std::cell::UnsafeCell<bool>,
features: u64,
protocol_features: VhostUserProtocolFeatures,
hdr_flags: std::cell::UnsafeCell<VhostUserHeaderFlag>,
}
impl VhostUserHandleBackend for MockMaster {
fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
Self {
sock,
max_queue_num,
is_owner: std::cell::UnsafeCell::new(false),
features: AVAILABLE_FEATURES | (1 << VIRTIO_BLK_F_FLUSH),
protocol_features: VhostUserProtocolFeatures::all(),
hdr_flags: std::cell::UnsafeCell::new(VhostUserHeaderFlag::empty()),
}
}
fn set_owner(&self) -> Result<(), vhost::Error> {
unsafe { *self.is_owner.get() = true };
Ok(())
}
fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) {
unsafe { *self.hdr_flags.get() = flags };
}
fn get_features(&self) -> Result<u64, vhost::Error> {
Ok(self.features)
}
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures, vhost::Error> {
Ok(self.protocol_features)
}
fn set_protocol_features(
&mut self,
features: VhostUserProtocolFeatures,
) -> Result<(), vhost::Error> {
self.protocol_features = features;
Ok(())
}
fn get_config(
&mut self,
_offset: u32,
_size: u32,
_flags: VhostUserConfigFlags,
_buf: &[u8],
) -> Result<(VhostUserConfig, VhostUserConfigPayload), vhost::Error> {
Ok((VhostUserConfig::default(), vec![0x69, 0x69, 0x69]))
}
}
let (_tmp_dir, tmp_socket_path) = create_tmp_socket();
let vhost_block_config = VhostUserBlockConfig {
drive_id: "test_drive".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Writeback,
socket: tmp_socket_path.clone(),
};
let mut vhost_block = VhostUserBlockImpl::<MockMaster>::new(vhost_block_config).unwrap();
// If backend has all features, features offered by block device
// should be negotiated and header flags should be set.
assert_eq!(
vhost_block
.vu_handle
.vu
.sock
.peer_addr()
.unwrap()
.as_pathname()
.unwrap()
.to_str()
.unwrap(),
&tmp_socket_path,
);
assert_eq!(vhost_block.vu_handle.vu.max_queue_num, NUM_QUEUES);
assert!(unsafe { *vhost_block.vu_handle.vu.is_owner.get() });
assert_eq!(
vhost_block.avail_features,
AVAILABLE_FEATURES | (1 << VIRTIO_BLK_F_FLUSH)
);
assert_eq!(
vhost_block.acked_features,
VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits()
);
assert_eq!(
vhost_block.vu_acked_protocol_features,
VhostUserProtocolFeatures::CONFIG.bits()
);
assert_eq!(
unsafe { &*vhost_block.vu_handle.vu.hdr_flags.get() }.bits(),
VhostUserHeaderFlag::empty().bits()
);
assert!(!vhost_block.root_device);
assert!(!vhost_block.read_only);
assert_eq!(vhost_block.config_space, vec![0x69, 0x69, 0x69]);
// Test some `VirtioDevice` methods
assert_eq!(
vhost_block.avail_features(),
AVAILABLE_FEATURES | (1 << VIRTIO_BLK_F_FLUSH)
);
assert_eq!(
vhost_block.acked_features(),
VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits()
);
// Valid read
let mut read_config = vec![0, 0, 0];
vhost_block.read_config(0, &mut read_config);
assert_eq!(read_config, vec![0x69, 0x69, 0x69]);
// Invalid offset
let mut read_config = vec![0, 0, 0];
vhost_block.read_config(0x69, &mut read_config);
assert_eq!(read_config, vec![0, 0, 0]);
// Writing to the config does nothing
vhost_block.write_config(0x69, &[0]);
assert_eq!(vhost_block.config_space, vec![0x69, 0x69, 0x69]);
// Testing [`config_update`]
vhost_block.device_state = DeviceState::Activated(ActiveState {
mem: default_mem(),
interrupt: default_interrupt(),
});
vhost_block.config_space = vec![];
vhost_block.config_update().unwrap();
assert_eq!(vhost_block.config_space, vec![0x69, 0x69, 0x69]);
assert_eq!(
vhost_block.interrupt_status().load(Ordering::SeqCst),
VIRTIO_MMIO_INT_CONFIG
);
}
#[test]
fn test_activate() {
struct MockMaster {
features_are_set: std::cell::UnsafeCell<bool>,
memory_is_set: std::cell::UnsafeCell<bool>,
vring_enabled: std::cell::UnsafeCell<bool>,
}
impl VhostUserHandleBackend for MockMaster {
fn from_stream(_sock: UnixStream, _max_queue_num: u64) -> Self {
Self {
features_are_set: std::cell::UnsafeCell::new(false),
memory_is_set: std::cell::UnsafeCell::new(false),
vring_enabled: std::cell::UnsafeCell::new(false),
}
}
fn set_owner(&self) -> Result<(), vhost::Error> {
Ok(())
}
fn set_hdr_flags(&self, _flags: VhostUserHeaderFlag) {}
fn get_features(&self) -> Result<u64, vhost::Error> {
Ok(0)
}
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures, vhost::Error> {
Ok(VhostUserProtocolFeatures::empty())
}
fn set_protocol_features(
&mut self,
_features: VhostUserProtocolFeatures,
) -> Result<(), vhost::Error> {
Ok(())
}
fn get_config(
&mut self,
_offset: u32,
_size: u32,
_flags: VhostUserConfigFlags,
_buf: &[u8],
) -> Result<(VhostUserConfig, VhostUserConfigPayload), vhost::Error> {
Ok((VhostUserConfig::default(), vec![]))
}
fn set_features(&self, _features: u64) -> Result<(), vhost::Error> {
unsafe { (*self.features_are_set.get()) = true };
Ok(())
}
fn set_mem_table(
&self,
_regions: &[VhostUserMemoryRegionInfo],
) -> Result<(), vhost::Error> {
unsafe { (*self.memory_is_set.get()) = true };
Ok(())
}
fn set_vring_num(&self, _queue_index: usize, _num: u16) -> Result<(), vhost::Error> {
Ok(())
}
fn set_vring_addr(
&self,
_queue_index: usize,
_config_data: &VringConfigData,
) -> Result<(), vhost::Error> {
Ok(())
}
fn set_vring_base(&self, _queue_index: usize, _base: u16) -> Result<(), vhost::Error> {
Ok(())
}
fn set_vring_call(
&self,
_queue_index: usize,
_fd: &EventFd,
) -> Result<(), vhost::Error> {
Ok(())
}
fn set_vring_kick(
&self,
_queue_index: usize,
_fd: &EventFd,
) -> Result<(), vhost::Error> {
Ok(())
}
fn set_vring_enable(
&mut self,
_queue_index: usize,
_enable: bool,
) -> Result<(), vhost::Error> {
unsafe { (*self.vring_enabled.get()) = true };
Ok(())
}
}
// Block creation
let (_tmp_dir, tmp_socket_path) = create_tmp_socket();
let vhost_block_config = VhostUserBlockConfig {
drive_id: "test_drive".to_string(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Writeback,
socket: tmp_socket_path,
};
let mut vhost_block = VhostUserBlockImpl::<MockMaster>::new(vhost_block_config).unwrap();
// Memory creation
let region_size = 0x10000;
let file = TempFile::new().unwrap().into_file();
file.set_len(region_size as u64).unwrap();
let regions = vec![(GuestAddress(0x0), region_size)];
let guest_memory = create_mem(file, ®ions);
let q = VirtQueue::new(GuestAddress(0), &guest_memory, 16);
vhost_block.queues[0] = q.create_queue();
let interrupt = default_interrupt();
// During actiavion of the device features, memory and queues should be set and activated.
vhost_block.activate(guest_memory, interrupt).unwrap();
assert!(unsafe { *vhost_block.vu_handle.vu.features_are_set.get() });
assert!(unsafe { *vhost_block.vu_handle.vu.memory_is_set.get() });
assert!(unsafe { *vhost_block.vu_handle.vu.vring_enabled.get() });
assert!(vhost_block.is_activated());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/vhost_user/mod.rs | src/vmm/src/devices/virtio/block/vhost_user/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod device;
pub mod event_handler;
pub mod persist;
use self::device::VhostUserBlock;
use crate::devices::virtio::vhost_user::VhostUserError;
use crate::vstate::interrupts::InterruptError;
/// Number of queues for the vhost-user block device.
pub const NUM_QUEUES: u64 = 1;
/// Queue size for the vhost-user block device.
pub const QUEUE_SIZE: u16 = 256;
/// Vhost-user block device error.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VhostUserBlockError {
/// Cannot create config
Config,
/// Snapshotting of vhost-user-blk devices is not supported
SnapshottingNotSupported,
/// Vhost-user error: {0}
VhostUser(VhostUserError),
/// Vhost error: {0}
Vhost(vhost::Error),
/// Error opening eventfd: {0}
EventFd(std::io::Error),
/// Error creating irqfd: {0}
Interrupt(InterruptError),
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/block/vhost_user/event_handler.rs | src/vmm/src/devices/virtio/block/vhost_user/event_handler.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use event_manager::{EventOps, Events, MutEventSubscriber};
use vmm_sys_util::epoll::EventSet;
use super::VhostUserBlock;
use crate::devices::virtio::device::VirtioDevice;
use crate::logger::{error, warn};
impl VhostUserBlock {
const PROCESS_ACTIVATE: u32 = 0;
fn register_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to register activate event: {}", err);
}
}
fn process_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = self.activate_evt.read() {
error!("Failed to consume block activate event: {:?}", err);
}
if let Err(err) = ops.remove(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to un-register activate event: {}", err);
}
}
}
impl MutEventSubscriber for VhostUserBlock {
// Handle an event for queue or rate limiter.
fn process(&mut self, event: Events, ops: &mut EventOps) {
let source = event.data();
let event_set = event.event_set();
let supported_events = EventSet::IN;
if !supported_events.contains(event_set) {
warn!(
"Received unknown event: {:?} from source: {:?}",
event_set, source
);
return;
}
if self.is_activated() {
if Self::PROCESS_ACTIVATE == source {
self.process_activate_event(ops)
} else {
warn!("BlockVhost: Spurious event received: {:?}", source)
}
} else {
warn!(
"BlockVhost: The device is not yet activated. Spurious event received: {:?}",
source
);
}
}
fn init(&mut self, ops: &mut EventOps) {
// This function can be called during different points in the device lifetime:
// - shortly after device creation,
// - on device activation (is-activated already true at this point),
// - on device restore from snapshot.
if self.is_activated() {
warn!("Vhost-user block: unexpected init event");
} else {
self.register_activate_event(ops);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/tap.rs | src/vmm/src/devices/virtio/net/tap.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::{self, Debug};
use std::fs::File;
use std::io::Error as IoError;
use std::os::raw::*;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use vmm_sys_util::ioctl::{ioctl_with_mut_ref, ioctl_with_ref, ioctl_with_val};
use vmm_sys_util::ioctl_iow_nr;
use crate::devices::virtio::iovec::IoVecBuffer;
use crate::devices::virtio::net::generated;
// As defined in the Linux UAPI:
// https://elixir.bootlin.com/linux/v4.17/source/include/uapi/linux/if.h#L33
const IFACE_NAME_MAX_LEN: usize = 16;
/// List of errors the tap implementation can throw.
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum TapError {
/// Couldn't open /dev/net/tun: {0}
OpenTun(IoError),
/// Invalid interface name
InvalidIfname,
/// Error while creating ifreq structure: {0}. Invalid TUN/TAP Backend provided by {1}. Check our documentation on setting up the network devices.
IfreqExecuteError(IoError, String),
/// Error while setting the offload flags: {0}
SetOffloadFlags(IoError),
/// Error while setting size of the vnet header: {0}
SetSizeOfVnetHdr(IoError),
}
const TUNTAP: ::std::os::raw::c_uint = 84;
ioctl_iow_nr!(TUNSETIFF, TUNTAP, 202, ::std::os::raw::c_int);
ioctl_iow_nr!(TUNSETOFFLOAD, TUNTAP, 208, ::std::os::raw::c_uint);
ioctl_iow_nr!(TUNSETVNETHDRSZ, TUNTAP, 216, ::std::os::raw::c_int);
/// Handle for a network tap interface.
///
/// For now, this simply wraps the file descriptor for the tap device so methods
/// can run ioctls on the interface. The tap interface fd will be closed when
/// Tap goes out of scope, and the kernel will clean up the interface automatically.
#[derive(Debug)]
pub struct Tap {
tap_file: File,
pub(crate) if_name: [u8; IFACE_NAME_MAX_LEN],
}
// Returns a byte vector representing the contents of a null terminated C string which
// contains if_name.
fn build_terminated_if_name(if_name: &str) -> Result<[u8; IFACE_NAME_MAX_LEN], TapError> {
// Convert the string slice to bytes, and shadow the variable,
// since we no longer need the &str version.
let if_name = if_name.as_bytes();
if if_name.len() >= IFACE_NAME_MAX_LEN {
return Err(TapError::InvalidIfname);
}
let mut terminated_if_name = [b'\0'; IFACE_NAME_MAX_LEN];
terminated_if_name[..if_name.len()].copy_from_slice(if_name);
Ok(terminated_if_name)
}
#[derive(Copy, Clone)]
pub struct IfReqBuilder(generated::ifreq);
impl fmt::Debug for IfReqBuilder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "IfReqBuilder {{ .. }}")
}
}
impl IfReqBuilder {
pub fn new() -> Self {
Self(Default::default())
}
pub fn if_name(mut self, if_name: &[u8; IFACE_NAME_MAX_LEN]) -> Self {
// SAFETY: Since we don't call as_mut on the same union field more than once, this block is
// safe.
let ifrn_name = unsafe { self.0.ifr_ifrn.ifrn_name.as_mut() };
ifrn_name.copy_from_slice(if_name.as_ref());
self
}
pub(crate) fn flags(mut self, flags: i16) -> Self {
self.0.ifr_ifru.ifru_flags = flags;
self
}
pub(crate) fn execute<F: AsRawFd + Debug>(
mut self,
socket: &F,
ioctl: u64,
) -> std::io::Result<generated::ifreq> {
// SAFETY: ioctl is safe. Called with a valid socket fd, and we check the return.
if unsafe { ioctl_with_mut_ref(socket, ioctl, &mut self.0) } < 0 {
return Err(IoError::last_os_error());
}
Ok(self.0)
}
}
impl Tap {
/// Create a TUN/TAP device given the interface name.
/// # Arguments
///
/// * `if_name` - the name of the interface.
pub fn open_named(if_name: &str) -> Result<Tap, TapError> {
// SAFETY: Open calls are safe because we give a constant null-terminated
// string and verify the result.
let fd = unsafe {
libc::open(
c"/dev/net/tun".as_ptr(),
libc::O_RDWR | libc::O_NONBLOCK | libc::O_CLOEXEC,
)
};
if fd < 0 {
return Err(TapError::OpenTun(IoError::last_os_error()));
}
// SAFETY: We just checked that the fd is valid.
let tuntap = unsafe { File::from_raw_fd(fd) };
let terminated_if_name = build_terminated_if_name(if_name)?;
let ifreq = IfReqBuilder::new()
.if_name(&terminated_if_name)
.flags(
i16::try_from(generated::IFF_TAP | generated::IFF_NO_PI | generated::IFF_VNET_HDR)
.unwrap(),
)
.execute(&tuntap, TUNSETIFF())
.map_err(|io_error| TapError::IfreqExecuteError(io_error, if_name.to_owned()))?;
Ok(Tap {
tap_file: tuntap,
// SAFETY: Safe since only the name is accessed, and it's cloned out.
if_name: unsafe { ifreq.ifr_ifrn.ifrn_name },
})
}
/// Retrieve the interface's name as a str.
pub fn if_name_as_str(&self) -> &str {
let len = self
.if_name
.iter()
.position(|x| *x == 0)
.unwrap_or(IFACE_NAME_MAX_LEN);
std::str::from_utf8(&self.if_name[..len]).unwrap_or("")
}
/// Set the offload flags for the tap interface.
pub fn set_offload(&self, flags: c_uint) -> Result<(), TapError> {
// SAFETY: ioctl is safe. Called with a valid tap fd, and we check the return.
if unsafe { ioctl_with_val(&self.tap_file, TUNSETOFFLOAD(), c_ulong::from(flags)) } < 0 {
return Err(TapError::SetOffloadFlags(IoError::last_os_error()));
}
Ok(())
}
/// Set the size of the vnet hdr.
pub fn set_vnet_hdr_size(&self, size: c_int) -> Result<(), TapError> {
// SAFETY: ioctl is safe. Called with a valid tap fd, and we check the return.
if unsafe { ioctl_with_ref(&self.tap_file, TUNSETVNETHDRSZ(), &size) } < 0 {
return Err(TapError::SetSizeOfVnetHdr(IoError::last_os_error()));
}
Ok(())
}
/// Write an `IoVecBuffer` to tap
pub(crate) fn write_iovec(&mut self, buffer: &IoVecBuffer) -> Result<usize, IoError> {
let iovcnt = i32::try_from(buffer.iovec_count()).unwrap();
let iov = buffer.as_iovec_ptr();
// SAFETY: `writev` is safe. Called with a valid tap fd, the iovec pointer and length
// is provide by the `IoVecBuffer` implementation and we check the return value.
let ret = unsafe { libc::writev(self.tap_file.as_raw_fd(), iov, iovcnt) };
if ret == -1 {
return Err(IoError::last_os_error());
}
Ok(usize::try_from(ret).unwrap())
}
/// Read from tap to an `IoVecBufferMut`
pub(crate) fn read_iovec(&mut self, buffer: &mut [libc::iovec]) -> Result<usize, IoError> {
let iov = buffer.as_mut_ptr();
let iovcnt = buffer.len().try_into().unwrap();
// SAFETY: `readv` is safe. Called with a valid tap fd, the iovec pointer and length
// is provide by the `IoVecBufferMut` implementation and we check the return value.
let ret = unsafe { libc::readv(self.tap_file.as_raw_fd(), iov, iovcnt) };
if ret == -1 {
return Err(IoError::last_os_error());
}
Ok(usize::try_from(ret).unwrap())
}
}
impl AsRawFd for Tap {
fn as_raw_fd(&self) -> RawFd {
self.tap_file.as_raw_fd()
}
}
#[cfg(test)]
pub mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::ffi::OsStrExt;
use super::*;
use crate::devices::virtio::net::generated;
use crate::devices::virtio::net::test_utils::{TapTrafficSimulator, enable, if_index};
// Redefine `IoVecBufferMut` with specific length. Otherwise
// Rust will not know what to do.
type IoVecBufferMut = crate::devices::virtio::iovec::IoVecBufferMut<256>;
// The size of the virtio net header
const VNET_HDR_SIZE: usize = 10;
const PAYLOAD_SIZE: usize = 512;
#[test]
fn test_tap_name() {
// Sanity check that the assumed max iface name length is correct.
assert_eq!(IFACE_NAME_MAX_LEN, unsafe {
generated::ifreq__bindgen_ty_1::default().ifrn_name.len()
});
// Empty name - The tap should be named "tap0" by default
let tap = Tap::open_named("").unwrap();
assert_eq!(b"tap0\0\0\0\0\0\0\0\0\0\0\0\0", &tap.if_name);
assert_eq!("tap0", tap.if_name_as_str());
// Test using '%d' to have the kernel assign an unused name,
// and that we correctly copy back that generated name
let tap = Tap::open_named("tap%d").unwrap();
// '%d' should be replaced with _some_ number, although we don't know what was the next
// available one. Just assert that '%d' definitely isn't there anymore.
assert_ne!(b"tap%d", &tap.if_name[..5]);
// 16 characters - too long.
let name = "a123456789abcdef";
match Tap::open_named(name) {
Err(TapError::InvalidIfname) => (),
_ => panic!("Expected Error::InvalidIfname"),
};
// 15 characters - OK.
let name = "a123456789abcde";
let tap = Tap::open_named(name).unwrap();
assert_eq!(&format!("{}\0", name).as_bytes(), &tap.if_name);
assert_eq!(name, tap.if_name_as_str());
}
#[test]
fn test_tap_exclusive_open() {
let _tap1 = Tap::open_named("exclusivetap").unwrap();
// Opening same tap device a second time should not be permitted.
Tap::open_named("exclusivetap").unwrap_err();
}
#[test]
fn test_set_options() {
// This line will fail to provide an initialized FD if the test is not run as root.
let tap = Tap::open_named("").unwrap();
tap.set_vnet_hdr_size(16).unwrap();
tap.set_offload(0).unwrap();
}
#[test]
fn test_raw_fd() {
let tap = Tap::open_named("").unwrap();
assert_eq!(tap.as_raw_fd(), tap.tap_file.as_raw_fd());
}
#[test]
fn test_write_iovec() {
let mut tap = Tap::open_named("").unwrap();
enable(&tap);
let tap_traffic_simulator = TapTrafficSimulator::new(if_index(&tap));
let mut fragment1 = vmm_sys_util::rand::rand_bytes(PAYLOAD_SIZE);
fragment1.as_mut_slice()[..generated::ETH_HLEN as usize]
.copy_from_slice(&[0; generated::ETH_HLEN as usize]);
let fragment2 = vmm_sys_util::rand::rand_bytes(PAYLOAD_SIZE);
let fragment3 = vmm_sys_util::rand::rand_bytes(PAYLOAD_SIZE);
let scattered = IoVecBuffer::from(vec![
fragment1.as_slice(),
fragment2.as_slice(),
fragment3.as_slice(),
]);
let num_bytes = tap.write_iovec(&scattered).unwrap();
assert_eq!(num_bytes, scattered.len() as usize);
let mut read_buf = vec![0u8; scattered.len() as usize];
assert!(tap_traffic_simulator.pop_rx_packet(&mut read_buf));
assert_eq!(
&read_buf[..PAYLOAD_SIZE - VNET_HDR_SIZE],
&fragment1[VNET_HDR_SIZE..]
);
assert_eq!(
&read_buf[PAYLOAD_SIZE - VNET_HDR_SIZE..2 * PAYLOAD_SIZE - VNET_HDR_SIZE],
fragment2
);
assert_eq!(
&read_buf[2 * PAYLOAD_SIZE - VNET_HDR_SIZE..3 * PAYLOAD_SIZE - VNET_HDR_SIZE],
fragment3
);
}
#[test]
fn test_read_iovec() {
let mut tap = Tap::open_named("").unwrap();
enable(&tap);
let tap_traffic_simulator = TapTrafficSimulator::new(if_index(&tap));
let mut buff1 = vec![0; PAYLOAD_SIZE + VNET_HDR_SIZE];
let mut buff2 = vec![0; 2 * PAYLOAD_SIZE];
let mut rx_buffers = IoVecBufferMut::from(vec![buff1.as_mut_slice(), buff2.as_mut_slice()]);
let packet = vmm_sys_util::rand::rand_alphanumerics(2 * PAYLOAD_SIZE);
tap_traffic_simulator.push_tx_packet(packet.as_bytes());
assert_eq!(
tap.read_iovec(rx_buffers.as_iovec_mut_slice()).unwrap(),
2 * PAYLOAD_SIZE + VNET_HDR_SIZE
);
assert_eq!(&buff1[VNET_HDR_SIZE..], &packet.as_bytes()[..PAYLOAD_SIZE]);
assert_eq!(&buff2[..PAYLOAD_SIZE], &packet.as_bytes()[PAYLOAD_SIZE..]);
assert_eq!(&buff2[PAYLOAD_SIZE..], &vec![0; PAYLOAD_SIZE])
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/persist.rs | src/vmm/src/devices/virtio/net/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring net devices.
use std::io;
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use super::device::{Net, RxBuffers};
use super::{NET_NUM_QUEUES, NET_QUEUE_MAX_SIZE, RX_INDEX, TapError};
use crate::devices::virtio::device::{ActiveState, DeviceState};
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_NET;
use crate::devices::virtio::persist::{PersistError as VirtioStateError, VirtioDeviceState};
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::mmds::data_store::Mmds;
use crate::mmds::ns::MmdsNetworkStack;
use crate::mmds::persist::MmdsNetworkStackState;
use crate::rate_limiter::RateLimiter;
use crate::rate_limiter::persist::RateLimiterState;
use crate::snapshot::Persist;
use crate::utils::net::mac::MacAddr;
use crate::vstate::memory::GuestMemoryMmap;
/// Information about the network config's that are saved
/// at snapshot.
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct NetConfigSpaceState {
guest_mac: Option<MacAddr>,
}
/// Information about the network device that are saved
/// at snapshot.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetState {
pub id: String,
pub tap_if_name: String,
rx_rate_limiter_state: RateLimiterState,
tx_rate_limiter_state: RateLimiterState,
/// The associated MMDS network stack.
pub mmds_ns: Option<MmdsNetworkStackState>,
config_space: NetConfigSpaceState,
pub virtio_state: VirtioDeviceState,
}
/// Auxiliary structure for creating a device when resuming from a snapshot.
#[derive(Debug)]
pub struct NetConstructorArgs {
/// Pointer to guest memory.
pub mem: GuestMemoryMmap,
/// Pointer to the MMDS data store.
pub mmds: Option<Arc<Mutex<Mmds>>>,
}
/// Errors triggered when trying to construct a network device at resume time.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum NetPersistError {
/// Failed to create a network device: {0}
CreateNet(#[from] super::NetError),
/// Failed to create a rate limiter: {0}
CreateRateLimiter(#[from] io::Error),
/// Failed to re-create the virtio state (i.e queues etc): {0}
VirtioState(#[from] VirtioStateError),
/// Indicator that no MMDS is associated with this device.
NoMmdsDataStore,
/// Setting tap interface offload flags failed: {0}
TapSetOffload(TapError),
}
impl Persist<'_> for Net {
type State = NetState;
type ConstructorArgs = NetConstructorArgs;
type Error = NetPersistError;
fn save(&self) -> Self::State {
NetState {
id: self.id().clone(),
tap_if_name: self.iface_name(),
rx_rate_limiter_state: self.rx_rate_limiter.save(),
tx_rate_limiter_state: self.tx_rate_limiter.save(),
mmds_ns: self.mmds_ns.as_ref().map(|mmds| mmds.save()),
config_space: NetConfigSpaceState {
guest_mac: self.guest_mac,
},
virtio_state: VirtioDeviceState::from_device(self),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
// RateLimiter::restore() can fail at creating a timerfd.
let rx_rate_limiter = RateLimiter::restore((), &state.rx_rate_limiter_state)?;
let tx_rate_limiter = RateLimiter::restore((), &state.tx_rate_limiter_state)?;
let mut net = Net::new(
state.id.clone(),
&state.tap_if_name,
state.config_space.guest_mac,
rx_rate_limiter,
tx_rate_limiter,
)?;
// We trust the MMIODeviceManager::restore to pass us an MMDS data store reference if
// there is at least one net device having the MMDS NS present and/or the mmds version was
// persisted in the snapshot.
if let Some(mmds_ns) = &state.mmds_ns {
// We're safe calling unwrap() to discard the error, as MmdsNetworkStack::restore()
// always returns Ok.
net.mmds_ns = Some(
MmdsNetworkStack::restore(
constructor_args
.mmds
.map_or_else(|| Err(NetPersistError::NoMmdsDataStore), Ok)?,
mmds_ns,
)
.unwrap(),
);
}
net.queues = state.virtio_state.build_queues_checked(
&constructor_args.mem,
VIRTIO_ID_NET,
NET_NUM_QUEUES,
NET_QUEUE_MAX_SIZE,
)?;
net.avail_features = state.virtio_state.avail_features;
net.acked_features = state.virtio_state.acked_features;
Ok(net)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::net::test_utils::{default_net, default_net_no_mmds};
use crate::devices::virtio::test_utils::{default_interrupt, default_mem};
use crate::snapshot::Snapshot;
fn validate_save_and_restore(net: Net, mmds_ds: Option<Arc<Mutex<Mmds>>>) {
let guest_mem = default_mem();
let mut mem = vec![0; 4096];
let id;
let tap_if_name;
let has_mmds_ns;
let allow_mmds_requests;
let virtio_state;
// Create and save the net device.
{
Snapshot::new(net.save())
.save(&mut mem.as_mut_slice())
.unwrap();
// Save some fields that we want to check later.
id = net.id.clone();
tap_if_name = net.iface_name();
has_mmds_ns = net.mmds_ns.is_some();
allow_mmds_requests = has_mmds_ns && mmds_ds.is_some();
virtio_state = VirtioDeviceState::from_device(&net);
}
// Drop the initial net device so that we don't get an error when trying to recreate the
// TAP device.
drop(net);
{
// Deserialize and restore the net device.
match Net::restore(
NetConstructorArgs {
mem: guest_mem,
mmds: mmds_ds,
},
&Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data,
) {
Ok(restored_net) => {
// Test that virtio specific fields are the same.
assert_eq!(restored_net.device_type(), VIRTIO_ID_NET);
assert_eq!(restored_net.avail_features(), virtio_state.avail_features);
assert_eq!(restored_net.acked_features(), virtio_state.acked_features);
assert_eq!(restored_net.is_activated(), virtio_state.activated);
// Test that net specific fields are the same.
assert_eq!(&restored_net.id, &id);
assert_eq!(&restored_net.iface_name(), &tap_if_name);
assert_eq!(restored_net.mmds_ns.is_some(), allow_mmds_requests);
assert_eq!(restored_net.rx_rate_limiter, RateLimiter::default());
assert_eq!(restored_net.tx_rate_limiter, RateLimiter::default());
}
Err(NetPersistError::NoMmdsDataStore) => {
assert!(has_mmds_ns && !allow_mmds_requests)
}
_ => unreachable!(),
}
}
}
#[test]
fn test_persistence() {
let mmds = Some(Arc::new(Mutex::new(Mmds::default())));
validate_save_and_restore(default_net(), mmds.as_ref().cloned());
validate_save_and_restore(default_net_no_mmds(), None);
// Check what happens if the MMIODeviceManager gives us the reference to the MMDS
// data store even if this device does not have mmds ns configured.
// The restore should be conservative and not configure the mmds ns.
validate_save_and_restore(default_net_no_mmds(), mmds);
// Check what happens if the MMIODeviceManager does not give us the reference to the MMDS
// data store. This will return an error.
validate_save_and_restore(default_net(), None);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/device.rs | src/vmm/src/devices/virtio/net/device.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::collections::VecDeque;
use std::mem::{self};
use std::net::Ipv4Addr;
use std::num::Wrapping;
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use libc::{EAGAIN, iovec};
use log::{error, info};
use vmm_sys_util::eventfd::EventFd;
use super::NET_QUEUE_MAX_SIZE;
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::device::{ActiveState, DeviceState, VirtioDevice};
use crate::devices::virtio::generated::virtio_config::VIRTIO_F_VERSION_1;
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_NET;
use crate::devices::virtio::generated::virtio_net::{
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_UFO,
VIRTIO_NET_F_MAC, VIRTIO_NET_F_MRG_RXBUF, virtio_net_hdr_v1,
};
use crate::devices::virtio::generated::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use crate::devices::virtio::iovec::{
IoVecBuffer, IoVecBufferMut, IoVecError, ParsedDescriptorChain,
};
use crate::devices::virtio::net::metrics::{NetDeviceMetrics, NetMetricsPerDevice};
use crate::devices::virtio::net::tap::Tap;
use crate::devices::virtio::net::{
MAX_BUFFER_SIZE, NET_QUEUE_SIZES, NetError, NetQueue, RX_INDEX, TX_INDEX, generated,
};
use crate::devices::virtio::queue::{DescriptorChain, InvalidAvailIdx, Queue};
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::devices::{DeviceError, report_net_event_fail};
use crate::dumbo::pdu::arp::ETH_IPV4_FRAME_LEN;
use crate::dumbo::pdu::ethernet::{EthernetFrame, PAYLOAD_OFFSET};
use crate::impl_device_type;
use crate::logger::{IncMetric, METRICS};
use crate::mmds::data_store::Mmds;
use crate::mmds::ns::MmdsNetworkStack;
use crate::rate_limiter::{BucketUpdate, RateLimiter, TokenType};
use crate::utils::net::mac::MacAddr;
use crate::utils::u64_to_usize;
use crate::vstate::memory::{ByteValued, GuestMemoryMmap};
const FRAME_HEADER_MAX_LEN: usize = PAYLOAD_OFFSET + ETH_IPV4_FRAME_LEN;
pub(crate) const fn vnet_hdr_len() -> usize {
mem::size_of::<virtio_net_hdr_v1>()
}
// This returns the maximum frame header length. This includes the VNET header plus
// the maximum L2 frame header bytes which includes the ethernet frame header plus
// the header IPv4 ARP header which is 28 bytes long.
const fn frame_hdr_len() -> usize {
vnet_hdr_len() + FRAME_HEADER_MAX_LEN
}
// Frames being sent/received through the network device model have a VNET header. This
// function returns a slice which holds the L2 frame bytes without this header.
fn frame_bytes_from_buf(buf: &[u8]) -> Result<&[u8], NetError> {
if buf.len() < vnet_hdr_len() {
Err(NetError::VnetHeaderMissing)
} else {
Ok(&buf[vnet_hdr_len()..])
}
}
fn frame_bytes_from_buf_mut(buf: &mut [u8]) -> Result<&mut [u8], NetError> {
if buf.len() < vnet_hdr_len() {
Err(NetError::VnetHeaderMissing)
} else {
Ok(&mut buf[vnet_hdr_len()..])
}
}
// This initializes to all 0 the VNET hdr part of a buf.
fn init_vnet_hdr(buf: &mut [u8]) {
// The buffer should be larger than vnet_hdr_len.
buf[0..vnet_hdr_len()].fill(0);
}
#[derive(Debug, Default, Clone, Copy)]
#[repr(C)]
pub struct ConfigSpace {
pub guest_mac: MacAddr,
}
// SAFETY: `ConfigSpace` contains only PODs in `repr(C)` or `repr(transparent)`, without padding.
unsafe impl ByteValued for ConfigSpace {}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum AddRxBufferError {
/// Error while parsing new buffer: {0}
Parsing(#[from] IoVecError),
/// RX buffer is too small
BufferTooSmall,
}
/// A map of all the memory the guest has provided us with for performing RX
#[derive(Debug)]
pub struct RxBuffers {
// minimum size of a usable buffer for doing RX
pub min_buffer_size: u32,
// An [`IoVecBufferMut`] covering all the memory we have available for receiving network
// frames.
pub iovec: IoVecBufferMut<NET_QUEUE_MAX_SIZE>,
// A map of which part of the memory belongs to which `DescriptorChain` object
pub parsed_descriptors: VecDeque<ParsedDescriptorChain>,
// Buffers that we have used and they are ready to be given back to the guest.
pub used_descriptors: u16,
pub used_bytes: u32,
}
impl RxBuffers {
/// Create a new [`RxBuffers`] object for storing guest memory for performing RX
fn new() -> Result<Self, IoVecError> {
Ok(Self {
min_buffer_size: 0,
iovec: IoVecBufferMut::new()?,
parsed_descriptors: VecDeque::with_capacity(NET_QUEUE_MAX_SIZE.into()),
used_descriptors: 0,
used_bytes: 0,
})
}
/// Add a new `DescriptorChain` that we received from the RX queue in the buffer.
///
/// SAFETY: The `DescriptorChain` cannot be referencing the same memory location as any other
/// `DescriptorChain`. (See also related comment in
/// [`IoVecBufferMut::append_descriptor_chain`]).
unsafe fn add_buffer(
&mut self,
mem: &GuestMemoryMmap,
head: DescriptorChain,
) -> Result<(), AddRxBufferError> {
// SAFETY: descriptor chain cannot be referencing the same memory location as another chain
let parsed_dc = unsafe { self.iovec.append_descriptor_chain(mem, head)? };
if parsed_dc.length < self.min_buffer_size {
self.iovec.drop_chain_back(&parsed_dc);
return Err(AddRxBufferError::BufferTooSmall);
}
self.parsed_descriptors.push_back(parsed_dc);
Ok(())
}
/// Returns the total size of available space in the buffer.
#[inline(always)]
fn capacity(&self) -> u32 {
self.iovec.len()
}
/// Mark the first `size` bytes of available memory as used.
///
/// # Safety:
///
/// * The `RxBuffers` should include at least one parsed `DescriptorChain`.
/// * `size` needs to be smaller or equal to total length of the first `DescriptorChain` stored
/// in the `RxBuffers`.
unsafe fn mark_used(&mut self, mut bytes_written: u32, rx_queue: &mut Queue) {
self.used_bytes = bytes_written;
let mut used_heads: u16 = 0;
for parsed_dc in self.parsed_descriptors.iter() {
let used_bytes = bytes_written.min(parsed_dc.length);
// Safe because we know head_index isn't out of bounds
rx_queue
.write_used_element(self.used_descriptors, parsed_dc.head_index, used_bytes)
.unwrap();
bytes_written -= used_bytes;
self.used_descriptors += 1;
used_heads += 1;
if bytes_written == 0 {
break;
}
}
// We need to set num_buffers before dropping chains from `self.iovec`. Otherwise
// when we set headers, we will iterate over new, yet unused chains instead of the ones
// we need.
self.header_set_num_buffers(used_heads);
for _ in 0..used_heads {
let parsed_dc = self
.parsed_descriptors
.pop_front()
.expect("This should never happen if write to the buffer succeeded.");
self.iovec.drop_chain_front(&parsed_dc);
}
}
/// Write the number of descriptors used in VirtIO header
fn header_set_num_buffers(&mut self, nr_descs: u16) {
// We can unwrap here, because we have checked before that the `IoVecBufferMut` holds at
// least one buffer with the proper size, depending on the feature negotiation. In any
// case, the buffer holds memory of at least `std::mem::size_of::<virtio_net_hdr_v1>()`
// bytes.
self.iovec
.write_all_volatile_at(
&nr_descs.to_le_bytes(),
std::mem::offset_of!(virtio_net_hdr_v1, num_buffers),
)
.unwrap()
}
/// This will let the guest know that about all the `DescriptorChain` object that has been
/// used to receive a frame from the TAP.
fn finish_frame(&mut self, rx_queue: &mut Queue) {
rx_queue.advance_next_used(self.used_descriptors);
self.used_descriptors = 0;
self.used_bytes = 0;
}
/// Return a slice of iovecs for the first slice in the buffer.
/// Panics if there are no parsed descriptors.
fn single_chain_slice_mut(&mut self) -> &mut [iovec] {
let nr_iovecs = self.parsed_descriptors[0].nr_iovecs as usize;
&mut self.iovec.as_iovec_mut_slice()[..nr_iovecs]
}
/// Return a slice of iovecs for all descriptor chains in the buffer.
fn all_chains_slice_mut(&mut self) -> &mut [iovec] {
self.iovec.as_iovec_mut_slice()
}
}
/// VirtIO network device.
///
/// It emulates a network device able to exchange L2 frames between the guest
/// and a host-side tap device.
#[derive(Debug)]
pub struct Net {
pub(crate) id: String,
/// The backend for this device: a tap.
pub tap: Tap,
pub(crate) avail_features: u64,
pub(crate) acked_features: u64,
pub(crate) queues: Vec<Queue>,
pub(crate) queue_evts: Vec<EventFd>,
pub(crate) rx_rate_limiter: RateLimiter,
pub(crate) tx_rate_limiter: RateLimiter,
rx_frame_buf: [u8; MAX_BUFFER_SIZE],
tx_frame_headers: [u8; frame_hdr_len()],
pub(crate) config_space: ConfigSpace,
pub(crate) guest_mac: Option<MacAddr>,
pub(crate) device_state: DeviceState,
pub(crate) activate_evt: EventFd,
/// The MMDS stack corresponding to this interface.
/// Only if MMDS transport has been associated with it.
pub mmds_ns: Option<MmdsNetworkStack>,
pub(crate) metrics: Arc<NetDeviceMetrics>,
tx_buffer: IoVecBuffer,
pub(crate) rx_buffer: RxBuffers,
}
impl Net {
/// Create a new virtio network device with the given TAP interface.
pub fn new_with_tap(
id: String,
tap: Tap,
guest_mac: Option<MacAddr>,
rx_rate_limiter: RateLimiter,
tx_rate_limiter: RateLimiter,
) -> Result<Self, NetError> {
let mut avail_features = (1 << VIRTIO_NET_F_GUEST_CSUM)
| (1 << VIRTIO_NET_F_CSUM)
| (1 << VIRTIO_NET_F_GUEST_TSO4)
| (1 << VIRTIO_NET_F_GUEST_TSO6)
| (1 << VIRTIO_NET_F_GUEST_UFO)
| (1 << VIRTIO_NET_F_HOST_TSO4)
| (1 << VIRTIO_NET_F_HOST_TSO6)
| (1 << VIRTIO_NET_F_HOST_UFO)
| (1 << VIRTIO_F_VERSION_1)
| (1 << VIRTIO_NET_F_MRG_RXBUF)
| (1 << VIRTIO_RING_F_EVENT_IDX);
let mut config_space = ConfigSpace::default();
if let Some(mac) = guest_mac {
config_space.guest_mac = mac;
// Enabling feature for MAC address configuration
// If not set, the driver will generates a random MAC address
avail_features |= 1 << VIRTIO_NET_F_MAC;
}
let mut queue_evts = Vec::new();
let mut queues = Vec::new();
for size in NET_QUEUE_SIZES {
queue_evts.push(EventFd::new(libc::EFD_NONBLOCK).map_err(NetError::EventFd)?);
queues.push(Queue::new(size));
}
Ok(Net {
id: id.clone(),
tap,
avail_features,
acked_features: 0u64,
queues,
queue_evts,
rx_rate_limiter,
tx_rate_limiter,
rx_frame_buf: [0u8; MAX_BUFFER_SIZE],
tx_frame_headers: [0u8; frame_hdr_len()],
config_space,
guest_mac,
device_state: DeviceState::Inactive,
activate_evt: EventFd::new(libc::EFD_NONBLOCK).map_err(NetError::EventFd)?,
mmds_ns: None,
metrics: NetMetricsPerDevice::alloc(id),
tx_buffer: Default::default(),
rx_buffer: RxBuffers::new()?,
})
}
/// Create a new virtio network device given the interface name.
pub fn new(
id: String,
tap_if_name: &str,
guest_mac: Option<MacAddr>,
rx_rate_limiter: RateLimiter,
tx_rate_limiter: RateLimiter,
) -> Result<Self, NetError> {
let tap = Tap::open_named(tap_if_name).map_err(NetError::TapOpen)?;
let vnet_hdr_size = i32::try_from(vnet_hdr_len()).unwrap();
tap.set_vnet_hdr_size(vnet_hdr_size)
.map_err(NetError::TapSetVnetHdrSize)?;
Self::new_with_tap(id, tap, guest_mac, rx_rate_limiter, tx_rate_limiter)
}
/// Provides the ID of this net device.
pub fn id(&self) -> &String {
&self.id
}
/// Provides the MAC of this net device.
pub fn guest_mac(&self) -> Option<&MacAddr> {
self.guest_mac.as_ref()
}
/// Provides the host IFACE name of this net device.
pub fn iface_name(&self) -> String {
self.tap.if_name_as_str().to_string()
}
/// Provides the MmdsNetworkStack of this net device.
pub fn mmds_ns(&self) -> Option<&MmdsNetworkStack> {
self.mmds_ns.as_ref()
}
/// Configures the `MmdsNetworkStack` to allow device to forward MMDS requests.
/// If the device already supports MMDS, updates the IPv4 address.
pub fn configure_mmds_network_stack(&mut self, ipv4_addr: Ipv4Addr, mmds: Arc<Mutex<Mmds>>) {
if let Some(mmds_ns) = self.mmds_ns.as_mut() {
mmds_ns.set_ipv4_addr(ipv4_addr);
} else {
self.mmds_ns = Some(MmdsNetworkStack::new_with_defaults(Some(ipv4_addr), mmds))
}
}
/// Disables the `MmdsNetworkStack` to prevent device to forward MMDS requests.
pub fn disable_mmds_network_stack(&mut self) {
self.mmds_ns = None
}
/// Provides a reference to the configured RX rate limiter.
pub fn rx_rate_limiter(&self) -> &RateLimiter {
&self.rx_rate_limiter
}
/// Provides a reference to the configured TX rate limiter.
pub fn tx_rate_limiter(&self) -> &RateLimiter {
&self.tx_rate_limiter
}
/// Trigger queue notification for the guest if we used enough descriptors
/// for the notification to be enabled.
/// https://docs.oasis-open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.html#x1-320005
/// 2.6.7.1 Driver Requirements: Used Buffer Notification Suppression
fn try_signal_queue(&mut self, queue_type: NetQueue) -> Result<(), DeviceError> {
let qidx = match queue_type {
NetQueue::Rx => RX_INDEX,
NetQueue::Tx => TX_INDEX,
};
self.queues[qidx].advance_used_ring_idx();
if self.queues[qidx].prepare_kick() {
self.interrupt_trigger()
.trigger(VirtioInterruptType::Queue(qidx.try_into().unwrap()))
.map_err(|err| {
self.metrics.event_fails.inc();
DeviceError::FailedSignalingIrq(err)
})?;
}
Ok(())
}
// Helper function to consume one op with `size` bytes from a rate limiter
fn rate_limiter_consume_op(rate_limiter: &mut RateLimiter, size: u64) -> bool {
if !rate_limiter.consume(1, TokenType::Ops) {
return false;
}
if !rate_limiter.consume(size, TokenType::Bytes) {
rate_limiter.manual_replenish(1, TokenType::Ops);
return false;
}
true
}
// Helper function to replenish one operation with `size` bytes from a rate limiter
fn rate_limiter_replenish_op(rate_limiter: &mut RateLimiter, size: u64) {
rate_limiter.manual_replenish(1, TokenType::Ops);
rate_limiter.manual_replenish(size, TokenType::Bytes);
}
// Attempts to copy a single frame into the guest if there is enough
// rate limiting budget.
// Returns true on successful frame delivery.
pub fn rate_limited_rx_single_frame(&mut self, frame_size: u32) -> bool {
let rx_queue = &mut self.queues[RX_INDEX];
if !Self::rate_limiter_consume_op(&mut self.rx_rate_limiter, frame_size as u64) {
self.metrics.rx_rate_limiter_throttled.inc();
return false;
}
self.rx_buffer.finish_frame(rx_queue);
true
}
/// Returns the minimum size of buffer we expect the guest to provide us depending on the
/// features we have negotiated with it
fn minimum_rx_buffer_size(&self) -> u32 {
if !self.has_feature(VIRTIO_NET_F_MRG_RXBUF as u64) {
if self.has_feature(VIRTIO_NET_F_GUEST_TSO4 as u64)
|| self.has_feature(VIRTIO_NET_F_GUEST_TSO6 as u64)
|| self.has_feature(VIRTIO_NET_F_GUEST_UFO as u64)
{
MAX_BUFFER_SIZE.try_into().unwrap()
} else {
1526
}
} else {
vnet_hdr_len().try_into().unwrap()
}
}
/// Parse available RX `DescriptorChains` from the queue
pub fn parse_rx_descriptors(&mut self) -> Result<(), InvalidAvailIdx> {
// This is safe since we checked in the event handler that the device is activated.
let mem = &self.device_state.active_state().unwrap().mem;
let queue = &mut self.queues[RX_INDEX];
while let Some(head) = queue.pop_or_enable_notification()? {
let index = head.index;
// SAFETY: we are only using this `DescriptorChain` here.
if let Err(err) = unsafe { self.rx_buffer.add_buffer(mem, head) } {
self.metrics.rx_fails.inc();
// If guest uses dirty tricks to make us add more descriptors than
// we can hold, just stop processing.
if matches!(err, AddRxBufferError::Parsing(IoVecError::IovDequeOverflow)) {
error!("net: Could not add an RX descriptor: {err}");
queue.undo_pop();
break;
}
error!("net: Could not parse an RX descriptor: {err}");
// Add this broken chain to the used_ring. It will be
// reported to the quest on the next `rx_buffer.finish_frame` call.
// SAFETY:
// index is verified on `DescriptorChain` creation.
queue
.write_used_element(self.rx_buffer.used_descriptors, index, 0)
.unwrap();
self.rx_buffer.used_descriptors += 1;
}
}
Ok(())
}
// Tries to detour the frame to MMDS and if MMDS doesn't accept it, sends it on the host TAP.
//
// Returns whether MMDS consumed the frame.
fn write_to_mmds_or_tap(
mmds_ns: Option<&mut MmdsNetworkStack>,
rate_limiter: &mut RateLimiter,
headers: &mut [u8],
frame_iovec: &IoVecBuffer,
tap: &mut Tap,
guest_mac: Option<MacAddr>,
net_metrics: &NetDeviceMetrics,
) -> Result<bool, NetError> {
// Read the frame headers from the IoVecBuffer
let max_header_len = headers.len();
let header_len = frame_iovec
.read_volatile_at(&mut &mut *headers, 0, max_header_len)
.map_err(|err| {
error!("Received malformed TX buffer: {:?}", err);
net_metrics.tx_malformed_frames.inc();
NetError::VnetHeaderMissing
})?;
let headers = frame_bytes_from_buf(&headers[..header_len]).inspect_err(|_| {
error!("VNET headers missing in TX frame");
net_metrics.tx_malformed_frames.inc();
})?;
if let Some(ns) = mmds_ns
&& ns.is_mmds_frame(headers)
{
let mut frame = vec![0u8; frame_iovec.len() as usize - vnet_hdr_len()];
// Ok to unwrap here, because we are passing a buffer that has the exact size
// of the `IoVecBuffer` minus the VNET headers.
frame_iovec
.read_exact_volatile_at(&mut frame, vnet_hdr_len())
.unwrap();
let _ = ns.detour_frame(&frame);
METRICS.mmds.rx_accepted.inc();
// MMDS frames are not accounted by the rate limiter.
Self::rate_limiter_replenish_op(rate_limiter, u64::from(frame_iovec.len()));
// MMDS consumed the frame.
return Ok(true);
}
// This frame goes to the TAP.
// Check for guest MAC spoofing.
if let Some(guest_mac) = guest_mac {
let _ = EthernetFrame::from_bytes(headers).map(|eth_frame| {
if guest_mac != eth_frame.src_mac() {
net_metrics.tx_spoofed_mac_count.inc();
}
});
}
let _metric = net_metrics.tap_write_agg.record_latency_metrics();
match Self::write_tap(tap, frame_iovec) {
Ok(_) => {
let len = u64::from(frame_iovec.len());
net_metrics.tx_bytes_count.add(len);
net_metrics.tx_packets_count.inc();
net_metrics.tx_count.inc();
}
Err(err) => {
error!("Failed to write to tap: {:?}", err);
net_metrics.tap_write_fails.inc();
}
};
Ok(false)
}
// We currently prioritize packets from the MMDS over regular network packets.
fn read_from_mmds_or_tap(&mut self) -> Result<Option<u32>, NetError> {
// We only want to read from TAP (or mmds) if we have at least 64K of available capacity as
// this is the max size of 1 packet.
// SAFETY:
// * MAX_BUFFER_SIZE is constant and fits into u32
#[allow(clippy::cast_possible_truncation)]
if self.rx_buffer.capacity() < MAX_BUFFER_SIZE as u32 {
self.parse_rx_descriptors()?;
// If after parsing the RX queue we still don't have enough capacity, stop processing RX
// frames.
if self.rx_buffer.capacity() < MAX_BUFFER_SIZE as u32 {
return Ok(None);
}
}
if let Some(ns) = self.mmds_ns.as_mut()
&& let Some(len) =
ns.write_next_frame(frame_bytes_from_buf_mut(&mut self.rx_frame_buf)?)
{
let len = len.get();
METRICS.mmds.tx_frames.inc();
METRICS.mmds.tx_bytes.add(len as u64);
init_vnet_hdr(&mut self.rx_frame_buf);
self.rx_buffer
.iovec
.write_all_volatile_at(&self.rx_frame_buf[..vnet_hdr_len() + len], 0)?;
// SAFETY:
// * len will never be bigger that u32::MAX because mmds is bound
// by the size of `self.rx_frame_buf` which is MAX_BUFFER_SIZE size.
let len: u32 = (vnet_hdr_len() + len).try_into().unwrap();
// SAFETY:
// * We checked that `rx_buffer` includes at least one `DescriptorChain`
// * `rx_frame_buf` has size of `MAX_BUFFER_SIZE` and all `DescriptorChain` objects are
// at least that big.
unsafe {
self.rx_buffer.mark_used(len, &mut self.queues[RX_INDEX]);
}
return Ok(Some(len));
}
// SAFETY:
// * We ensured that `self.rx_buffer` has at least one DescriptorChain parsed in it.
let len = unsafe { self.read_tap().map_err(NetError::IO) }?;
// SAFETY:
// * len will never be bigger that u32::MAX
let len: u32 = len.try_into().unwrap();
// SAFETY:
// * `rx_buffer` has at least one `DescriptorChain`
// * `read_tap` passes the first `DescriptorChain` to `readv` so we can't have read more
// bytes than its capacity.
unsafe {
self.rx_buffer.mark_used(len, &mut self.queues[RX_INDEX]);
}
Ok(Some(len))
}
/// Read as many frames as possible.
fn process_rx(&mut self) -> Result<(), DeviceError> {
loop {
match self.read_from_mmds_or_tap() {
Ok(None) => {
self.metrics.no_rx_avail_buffer.inc();
break;
}
Ok(Some(bytes)) => {
self.metrics.rx_count.inc();
self.metrics.rx_bytes_count.add(bytes as u64);
self.metrics.rx_packets_count.inc();
if !self.rate_limited_rx_single_frame(bytes) {
break;
}
}
Err(NetError::IO(err)) => {
// The tap device is non-blocking, so any error aside from EAGAIN is
// unexpected.
match err.raw_os_error() {
Some(err) if err == EAGAIN => (),
_ => {
error!("Failed to read tap: {:?}", err);
self.metrics.tap_read_fails.inc();
return Err(DeviceError::FailedReadTap);
}
};
break;
}
Err(NetError::InvalidAvailIdx(err)) => {
return Err(DeviceError::InvalidAvailIdx(err));
}
Err(err) => {
error!("Spurious error in network RX: {:?}", err);
}
}
}
self.try_signal_queue(NetQueue::Rx)
}
fn resume_rx(&mut self) -> Result<(), DeviceError> {
// First try to handle any deferred frame
if self.rx_buffer.used_bytes != 0 {
// If can't finish sending this frame, re-set it as deferred and return; we can't
// process any more frames from the TAP.
if !self.rate_limited_rx_single_frame(self.rx_buffer.used_bytes) {
return Ok(());
}
}
self.process_rx()
}
fn process_tx(&mut self) -> Result<(), DeviceError> {
// This is safe since we checked in the event handler that the device is activated.
let mem = &self.device_state.active_state().unwrap().mem;
// The MMDS network stack works like a state machine, based on synchronous calls, and
// without being added to any event loop. If any frame is accepted by the MMDS, we also
// trigger a process_rx() which checks if there are any new frames to be sent, starting
// with the MMDS network stack.
let mut process_rx_for_mmds = false;
let mut used_any = false;
let tx_queue = &mut self.queues[TX_INDEX];
while let Some(head) = tx_queue.pop_or_enable_notification()? {
self.metrics
.tx_remaining_reqs_count
.add(tx_queue.len().into());
let head_index = head.index;
// Parse IoVecBuffer from descriptor head
// SAFETY: This descriptor chain is only loaded once
// virtio requests are handled sequentially so no two IoVecBuffers
// are live at the same time, meaning this has exclusive ownership over the memory
if unsafe { self.tx_buffer.load_descriptor_chain(mem, head).is_err() } {
self.metrics.tx_fails.inc();
tx_queue.add_used(head_index, 0)?;
continue;
};
// We only handle frames that are up to MAX_BUFFER_SIZE
if self.tx_buffer.len() as usize > MAX_BUFFER_SIZE {
error!("net: received too big frame from driver");
self.metrics.tx_malformed_frames.inc();
tx_queue.add_used(head_index, 0)?;
continue;
}
if !Self::rate_limiter_consume_op(
&mut self.tx_rate_limiter,
u64::from(self.tx_buffer.len()),
) {
tx_queue.undo_pop();
self.metrics.tx_rate_limiter_throttled.inc();
break;
}
let frame_consumed_by_mmds = Self::write_to_mmds_or_tap(
self.mmds_ns.as_mut(),
&mut self.tx_rate_limiter,
&mut self.tx_frame_headers,
&self.tx_buffer,
&mut self.tap,
self.guest_mac,
&self.metrics,
)
.unwrap_or(false);
if frame_consumed_by_mmds && self.rx_buffer.used_bytes == 0 {
// MMDS consumed this frame/request, let's also try to process the response.
process_rx_for_mmds = true;
}
tx_queue.add_used(head_index, 0)?;
used_any = true;
}
if !used_any {
self.metrics.no_tx_avail_buffer.inc();
}
// Cleanup tx_buffer to ensure no two buffers point at the same memory
self.tx_buffer.clear();
self.try_signal_queue(NetQueue::Tx)?;
// An incoming frame for the MMDS may trigger the transmission of a new message.
if process_rx_for_mmds {
self.process_rx()
} else {
Ok(())
}
}
/// Builds the offload features we will setup on the TAP device based on the features that the
/// guest supports.
pub fn build_tap_offload_features(guest_supported_features: u64) -> u32 {
let add_if_supported =
|tap_features: &mut u32, supported_features: u64, tap_flag: u32, virtio_flag: u32| {
if supported_features & (1 << virtio_flag) != 0 {
*tap_features |= tap_flag;
}
};
let mut tap_features: u32 = 0;
add_if_supported(
&mut tap_features,
guest_supported_features,
generated::TUN_F_CSUM,
VIRTIO_NET_F_GUEST_CSUM,
);
add_if_supported(
&mut tap_features,
guest_supported_features,
generated::TUN_F_UFO,
VIRTIO_NET_F_GUEST_UFO,
);
add_if_supported(
&mut tap_features,
guest_supported_features,
generated::TUN_F_TSO4,
VIRTIO_NET_F_GUEST_TSO4,
);
add_if_supported(
&mut tap_features,
guest_supported_features,
generated::TUN_F_TSO6,
VIRTIO_NET_F_GUEST_TSO6,
);
tap_features
}
/// Updates the parameters for the rate limiters
pub fn patch_rate_limiters(
&mut self,
rx_bytes: BucketUpdate,
rx_ops: BucketUpdate,
tx_bytes: BucketUpdate,
tx_ops: BucketUpdate,
) {
self.rx_rate_limiter.update_buckets(rx_bytes, rx_ops);
self.tx_rate_limiter.update_buckets(tx_bytes, tx_ops);
}
/// Reads a frame from the TAP device inside the first descriptor held by `self.rx_buffer`.
///
/// # Safety
///
/// `self.rx_buffer` needs to have at least one descriptor chain parsed
pub unsafe fn read_tap(&mut self) -> std::io::Result<usize> {
let slice = if self.has_feature(VIRTIO_NET_F_MRG_RXBUF as u64) {
self.rx_buffer.all_chains_slice_mut()
} else {
self.rx_buffer.single_chain_slice_mut()
};
self.tap.read_iovec(slice)
}
fn write_tap(tap: &mut Tap, buf: &IoVecBuffer) -> std::io::Result<usize> {
tap.write_iovec(buf)
}
/// Process a single RX queue event.
///
/// This is called by the event manager responding to the guest adding a new
/// buffer in the RX queue.
pub fn process_rx_queue_event(&mut self) {
self.metrics.rx_queue_event_count.inc();
if let Err(err) = self.queue_evts[RX_INDEX].read() {
// rate limiters present but with _very high_ allowed rate
error!("Failed to get rx queue event: {:?}", err);
self.metrics.event_fails.inc();
return;
} else {
self.parse_rx_descriptors().unwrap();
}
if self.rx_rate_limiter.is_blocked() {
self.metrics.rx_rate_limiter_throttled.inc();
} else {
// If the limiter is not blocked, resume the receiving of bytes.
self.resume_rx()
.unwrap_or_else(|err| report_net_event_fail(&self.metrics, err));
}
}
pub fn process_tap_rx_event(&mut self) {
// This is safe since we checked in the event handler that the device is activated.
self.metrics.rx_tap_event_count.inc();
// While limiter is blocked, don't process any more incoming.
if self.rx_rate_limiter.is_blocked() {
self.metrics.rx_rate_limiter_throttled.inc();
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/test_utils.rs | src/vmm/src/devices/virtio/net/test_utils.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![doc(hidden)]
use std::fs::File;
use std::mem;
use std::os::raw::c_ulong;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::process::Command;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use crate::devices::virtio::net::Net;
#[cfg(test)]
use crate::devices::virtio::net::device::vnet_hdr_len;
use crate::devices::virtio::net::generated::net_device_flags;
use crate::devices::virtio::net::tap::{IfReqBuilder, Tap};
use crate::devices::virtio::queue::Queue;
use crate::devices::virtio::test_utils::VirtQueue;
use crate::mmds::data_store::Mmds;
use crate::mmds::ns::MmdsNetworkStack;
use crate::rate_limiter::RateLimiter;
use crate::utils::net::mac::MacAddr;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
static NEXT_INDEX: AtomicUsize = AtomicUsize::new(1);
pub fn default_net() -> Net {
let next_tap = NEXT_INDEX.fetch_add(1, Ordering::SeqCst);
// Id is the firecracker-facing identifier, e.g. local to the FC process. We thus do not need to
// make sure it is globally unique
let tap_device_id = format!("net-device{}", next_tap);
// This is the device name on the host, and thus needs to be unique between all firecracker
// processes. We cannot use the above counter to ensure this uniqueness (as it is
// per-process). Thus, ask the kernel to assign us a number.
let tap_if_name = "net-device%d";
let guest_mac = default_guest_mac();
let mut net = Net::new(
tap_device_id,
tap_if_name,
Some(guest_mac),
RateLimiter::default(),
RateLimiter::default(),
)
.unwrap();
net.configure_mmds_network_stack(
MmdsNetworkStack::default_ipv4_addr(),
Arc::new(Mutex::new(Mmds::default())),
);
enable(&net.tap);
net
}
pub fn default_net_no_mmds() -> Net {
let next_tap = NEXT_INDEX.fetch_add(1, Ordering::SeqCst);
let tap_device_id = format!("net-device{}", next_tap);
let guest_mac = default_guest_mac();
let net = Net::new(
tap_device_id,
"net-device%d",
Some(guest_mac),
RateLimiter::default(),
RateLimiter::default(),
)
.unwrap();
enable(&net.tap);
net
}
#[derive(Debug)]
pub enum NetQueue {
Rx,
Tx,
}
#[derive(Debug)]
pub enum NetEvent {
RxQueue,
RxRateLimiter,
Tap,
TxQueue,
TxRateLimiter,
}
#[derive(Debug)]
pub struct TapTrafficSimulator {
socket: File,
send_addr: libc::sockaddr_ll,
}
impl TapTrafficSimulator {
pub fn new(tap_index: i32) -> Self {
// Create sockaddr_ll struct.
// SAFETY: sockaddr_storage has no invariants and can be safely zeroed.
let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() };
let send_addr_ptr = &mut storage as *mut libc::sockaddr_storage;
// SAFETY: `sock_addr` is a valid pointer and safe to dereference.
unsafe {
let sock_addr: *mut libc::sockaddr_ll = send_addr_ptr.cast::<libc::sockaddr_ll>();
(*sock_addr).sll_family = libc::sa_family_t::try_from(libc::AF_PACKET).unwrap();
(*sock_addr).sll_protocol = u16::try_from(libc::ETH_P_ALL).unwrap().to_be();
(*sock_addr).sll_halen = u8::try_from(libc::ETH_ALEN).unwrap();
(*sock_addr).sll_ifindex = tap_index;
}
// Bind socket to tap interface.
let socket = create_socket();
// SAFETY: Call is safe because parameters are valid.
let ret = unsafe {
libc::bind(
socket.as_raw_fd(),
send_addr_ptr.cast(),
libc::socklen_t::try_from(mem::size_of::<libc::sockaddr_ll>()).unwrap(),
)
};
if ret == -1 {
panic!("Can't create TapChannel");
}
// Enable nonblocking
// SAFETY: Call is safe because parameters are valid.
let ret = unsafe { libc::fcntl(socket.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK) };
if ret == -1 {
panic!("Couldn't make TapChannel non-blocking");
}
Self {
socket,
// SAFETY: size_of::<libc::sockaddr_storage>() is greater than
// sizeof::<libc::sockaddr_ll>(), so to return an owned value of sockaddr_ll
// from the stack-local libc::sockaddr_storage that we have, we need to
// 1. Create a zeroed out libc::sockaddr_ll,
// 2. Copy over the first size_of::<libc::sockaddr_ll>() bytes into the struct we want
// to return
// We cannot simply return "*(send_addr_ptr as *const libc::sockaddr_ll)", as this
// would return a reference to a variable that lives in the stack frame of the current
// function, and which will no longer be valid after returning.
// transmute_copy does all this for us.
// Note that this is how these structures are intended to be used in C.
send_addr: unsafe { mem::transmute_copy(&storage) },
}
}
pub fn push_tx_packet(&self, buf: &[u8]) {
// SAFETY: The call is safe since the parameters are valid.
let res = unsafe {
libc::sendto(
self.socket.as_raw_fd(),
buf.as_ptr().cast(),
buf.len(),
0,
(&self.send_addr as *const libc::sockaddr_ll).cast(),
libc::socklen_t::try_from(mem::size_of::<libc::sockaddr_ll>()).unwrap(),
)
};
if res == -1 {
panic!("Can't inject tx_packet");
}
}
pub fn pop_rx_packet(&self, buf: &mut [u8]) -> bool {
// SAFETY: The call is safe since the parameters are valid.
let ret = unsafe {
libc::recvfrom(
self.socket.as_raw_fd(),
buf.as_ptr() as *mut _,
buf.len(),
0,
(&mut mem::zeroed() as *mut libc::sockaddr_storage).cast(),
&mut libc::socklen_t::try_from(mem::size_of::<libc::sockaddr_storage>()).unwrap(),
)
};
if ret == -1 {
return false;
}
true
}
}
pub fn create_socket() -> File {
// SAFETY: This is safe since we check the return value.
let socket = unsafe { libc::socket(libc::AF_PACKET, libc::SOCK_RAW, libc::ETH_P_ALL.to_be()) };
if socket < 0 {
panic!("Unable to create tap socket");
}
// SAFETY: This is safe; nothing else will use or hold onto the raw socket fd.
unsafe { File::from_raw_fd(socket) }
}
// Returns handles to virtio queues creation/activation and manipulation.
pub fn virtqueues(mem: &GuestMemoryMmap) -> (VirtQueue<'_>, VirtQueue<'_>) {
let rxq = VirtQueue::new(GuestAddress(0), mem, 16);
let txq = VirtQueue::new(GuestAddress(0x1000), mem, 16);
assert!(rxq.end().0 < txq.start().0);
(rxq, txq)
}
pub fn if_index(tap: &Tap) -> i32 {
let sock = create_socket();
let ifreq = IfReqBuilder::new()
.if_name(&tap.if_name)
.execute(
&sock,
c_ulong::from(super::generated::sockios::SIOCGIFINDEX),
)
.unwrap();
// SAFETY: Using this union variant is safe since `SIOCGIFINDEX` returns an integer.
unsafe { ifreq.ifr_ifru.ifru_ivalue }
}
/// Enable the tap interface.
pub fn enable(tap: &Tap) {
// Disable IPv6 router advertisement requests
Command::new("sh")
.arg("-c")
.arg(format!(
"echo 0 > /proc/sys/net/ipv6/conf/{}/accept_ra",
tap.if_name_as_str()
))
.output()
.unwrap();
let sock = create_socket();
IfReqBuilder::new()
.if_name(&tap.if_name)
.flags(
(net_device_flags::IFF_UP
| net_device_flags::IFF_RUNNING
| net_device_flags::IFF_NOARP)
.try_into()
.unwrap(),
)
.execute(
&sock,
c_ulong::from(super::generated::sockios::SIOCSIFFLAGS),
)
.unwrap();
}
#[cfg(test)]
pub(crate) fn inject_tap_tx_frame(net: &Net, len: usize) -> Vec<u8> {
use std::os::unix::ffi::OsStrExt;
assert!(len >= vnet_hdr_len());
let tap_traffic_simulator = TapTrafficSimulator::new(if_index(&net.tap));
let mut frame = vmm_sys_util::rand::rand_alphanumerics(len - vnet_hdr_len())
.as_bytes()
.to_vec();
tap_traffic_simulator.push_tx_packet(&frame);
frame.splice(0..0, vec![b'\0'; vnet_hdr_len()]);
frame
}
pub fn default_guest_mac() -> MacAddr {
MacAddr::from_str("11:22:33:44:55:66").unwrap()
}
pub fn set_mac(net: &mut Net, mac: MacAddr) {
net.guest_mac = Some(mac);
net.config_space.guest_mac = mac;
}
// Assigns "guest virtio driver" activated queues to the net device.
pub fn assign_queues(net: &mut Net, rxq: Queue, txq: Queue) {
net.queues.clear();
net.queues.push(rxq);
net.queues.push(txq);
}
#[cfg(test)]
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::undocumented_unsafe_blocks)]
pub mod test {
use std::os::unix::ffi::OsStrExt;
use std::sync::{Arc, Mutex, MutexGuard};
use std::{cmp, fmt};
use event_manager::{EventManager, SubscriberId, SubscriberOps};
use crate::check_metric_after_block;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::net::device::vnet_hdr_len;
use crate::devices::virtio::net::generated::ETH_HLEN;
use crate::devices::virtio::net::test_utils::{
NetEvent, NetQueue, assign_queues, default_net, inject_tap_tx_frame,
};
use crate::devices::virtio::net::{MAX_BUFFER_SIZE, Net, RX_INDEX, TX_INDEX};
use crate::devices::virtio::queue::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use crate::devices::virtio::test_utils::{VirtQueue, VirtqDesc, default_interrupt};
use crate::devices::virtio::transport::VirtioInterruptType;
use crate::logger::IncMetric;
use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
pub struct TestHelper<'a> {
pub event_manager: EventManager<Arc<Mutex<Net>>>,
pub subscriber_id: SubscriberId,
pub net: Arc<Mutex<Net>>,
pub mem: &'a GuestMemoryMmap,
pub rxq: VirtQueue<'a>,
pub txq: VirtQueue<'a>,
}
impl fmt::Debug for TestHelper<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TestHelper")
.field("event_manager", &"?")
.field("subscriber_id", &self.subscriber_id)
.field("net", &self.net)
.field("mem", &self.mem)
.field("rxq", &self.rxq)
.field("txq", &self.txq)
.finish()
}
}
impl<'a> TestHelper<'a> {
const QUEUE_SIZE: u16 = 16;
pub fn get_default(mem: &'a GuestMemoryMmap) -> TestHelper<'a> {
let mut event_manager = EventManager::new().unwrap();
let mut net = default_net();
let rxq = VirtQueue::new(GuestAddress(0), mem, Self::QUEUE_SIZE);
let txq = VirtQueue::new(
rxq.end().unchecked_align_up(VirtqDesc::ALIGNMENT),
mem,
Self::QUEUE_SIZE,
);
assign_queues(&mut net, rxq.create_queue(), txq.create_queue());
let net = Arc::new(Mutex::new(net));
let subscriber_id = event_manager.add_subscriber(net.clone());
Self {
event_manager,
subscriber_id,
net,
mem,
rxq,
txq,
}
}
pub fn net(&mut self) -> MutexGuard<'_, Net> {
self.net.lock().unwrap()
}
pub fn activate_net(&mut self) {
let interrupt = default_interrupt();
self.net
.lock()
.unwrap()
.activate(self.mem.clone(), interrupt)
.unwrap();
// Process the activate event.
let ev_count = self.event_manager.run_with_timeout(100).unwrap();
assert_eq!(ev_count, 1);
}
pub fn simulate_event(&mut self, event: NetEvent) {
match event {
NetEvent::RxQueue => self.net().process_rx_queue_event(),
NetEvent::RxRateLimiter => self.net().process_rx_rate_limiter_event(),
NetEvent::Tap => self.net().process_tap_rx_event(),
NetEvent::TxQueue => self.net().process_tx_queue_event(),
NetEvent::TxRateLimiter => self.net().process_tx_rate_limiter_event(),
};
}
pub fn data_addr(&self) -> u64 {
self.txq.end().raw_value()
}
pub fn add_desc_chain(
&mut self,
queue: NetQueue,
addr_offset: u64,
desc_list: &[(u16, u32, u16)],
) {
// Get queue and event_fd.
let net = self.net.lock().unwrap();
let (queue, event_fd) = match queue {
NetQueue::Rx => (&self.rxq, &net.queue_evts[RX_INDEX]),
NetQueue::Tx => (&self.txq, &net.queue_evts[TX_INDEX]),
};
// Create the descriptor chain.
let mut iter = desc_list.iter().peekable();
let mut addr = self.data_addr() + addr_offset;
while let Some(&(index, len, flags)) = iter.next() {
let desc = &queue.dtable[index as usize];
desc.set(addr, len, flags, 0);
if let Some(&&(next_index, _, _)) = iter.peek() {
desc.flags.set(flags | VIRTQ_DESC_F_NEXT);
desc.next.set(next_index);
}
addr += u64::from(len);
// Add small random gaps between descriptor addresses in order to make sure we
// don't blindly read contiguous memory.
addr += u64::from(vmm_sys_util::rand::xor_pseudo_rng_u32()) % 10;
}
// Mark the chain as available.
if let Some(&(index, _, _)) = desc_list.first() {
let ring_index = queue.avail.idx.get();
queue.avail.ring[ring_index as usize].set(index);
queue.avail.idx.set(ring_index + 1);
}
event_fd.write(1).unwrap();
}
/// Generate a tap frame of `frame_len` and check that it is not read and
/// the descriptor chain has been discarded
pub fn check_rx_discarded_buffer(&mut self, frame_len: usize) -> Vec<u8> {
let old_used_descriptors = self.net().rx_buffer.used_descriptors;
// Inject frame to tap and run epoll.
let frame = inject_tap_tx_frame(&self.net(), frame_len);
check_metric_after_block!(
self.net().metrics.rx_packets_count,
0,
self.event_manager.run_with_timeout(100).unwrap()
);
// Check that the descriptor chain has been discarded.
assert_eq!(
self.net().rx_buffer.used_descriptors,
old_used_descriptors + 1
);
assert!(
self.net()
.interrupt_trigger()
.has_pending_interrupt(VirtioInterruptType::Queue(RX_INDEX as u16))
);
frame
}
/// Check that after adding a valid Rx queue descriptor chain a previously deferred frame
/// is eventually received by the guest
pub fn check_rx_queue_resume(&mut self, expected_frame: &[u8]) {
// Need to call this to flush all previous frame
// and advance RX queue.
self.net().finish_frame();
let used_idx = self.rxq.used.idx.get();
// Add a valid Rx avail descriptor chain and run epoll.
self.add_desc_chain(
NetQueue::Rx,
0,
&[(0, MAX_BUFFER_SIZE as u32, VIRTQ_DESC_F_WRITE)],
);
check_metric_after_block!(
self.net().metrics.rx_packets_count,
1,
self.event_manager.run_with_timeout(100).unwrap()
);
// Check that the expected frame was sent to the Rx queue eventually.
assert_eq!(self.rxq.used.idx.get(), used_idx + 1);
assert!(
self.net()
.interrupt_trigger()
.has_pending_interrupt(VirtioInterruptType::Queue(RX_INDEX as u16))
);
self.rxq
.check_used_elem(used_idx, 0, expected_frame.len().try_into().unwrap());
self.rxq.dtable[0].check_data(expected_frame);
}
// Generates a frame of `frame_len` and writes it to the provided descriptor chain.
// Doesn't generate an error if the descriptor chain is longer than `frame_len`.
pub fn write_tx_frame(&self, desc_list: &[(u16, u32, u16)], frame_len: usize) -> Vec<u8> {
let mut frame = vmm_sys_util::rand::rand_alphanumerics(frame_len)
.as_bytes()
.to_vec();
let prefix_len = vnet_hdr_len() + ETH_HLEN as usize;
frame.splice(..prefix_len, vec![0; prefix_len]);
let mut frame_slice = frame.as_slice();
for &(index, len, _) in desc_list {
let chunk_size = cmp::min(frame_slice.len(), len as usize);
self.mem
.write_slice(
&frame_slice[..chunk_size],
GuestAddress::new(self.txq.dtable[index as usize].addr.get()),
)
.unwrap();
frame_slice = &frame_slice[chunk_size..];
}
frame
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/mod.rs | src/vmm/src/devices/virtio/net/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Implements a virtio network device.
use std::io;
/// Maximum size of the queue for network device.
pub const NET_QUEUE_MAX_SIZE: u16 = 256;
/// Maximum size of the frame buffers handled by this device.
pub const MAX_BUFFER_SIZE: usize = 65562;
/// The number of queues of the network device.
pub const NET_NUM_QUEUES: usize = 2;
pub const NET_QUEUE_SIZES: [u16; NET_NUM_QUEUES] = [NET_QUEUE_MAX_SIZE; NET_NUM_QUEUES];
/// The index of the rx queue from Net device queues/queues_evts vector.
pub const RX_INDEX: usize = 0;
/// The index of the tx queue from Net device queues/queues_evts vector.
pub const TX_INDEX: usize = 1;
pub mod device;
mod event_handler;
pub mod metrics;
pub mod persist;
mod tap;
pub mod test_utils;
mod generated;
pub use tap::{Tap, TapError};
use vm_memory::VolatileMemoryError;
pub use self::device::Net;
use super::iovec::IoVecError;
use crate::devices::virtio::queue::{InvalidAvailIdx, QueueError};
/// Enum representing the Net device queue types
#[derive(Debug)]
pub enum NetQueue {
/// The RX queue
Rx,
/// The TX queue
Tx,
}
/// Errors the network device can trigger.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum NetError {
/// Open tap device failed: {0}
TapOpen(TapError),
/// Setting vnet header size failed: {0}
TapSetVnetHdrSize(TapError),
/// EventFd error: {0}
EventFd(io::Error),
/// IO error: {0}
IO(io::Error),
/// Error writing in guest memory: {0}
GuestMemoryError(#[from] VolatileMemoryError),
/// The VNET header is missing from the frame
VnetHeaderMissing,
/// IoVecBuffer(Mut) error: {0}
IoVecError(#[from] IoVecError),
/// virtio queue error: {0}
QueueError(#[from] QueueError),
/// {0}
InvalidAvailIdx(#[from] InvalidAvailIdx),
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/event_handler.rs | src/vmm/src/devices/virtio/net/event_handler.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use event_manager::{EventOps, Events, MutEventSubscriber};
use vmm_sys_util::epoll::EventSet;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::net::device::Net;
use crate::devices::virtio::net::{RX_INDEX, TX_INDEX};
use crate::logger::{IncMetric, error, warn};
impl Net {
const PROCESS_ACTIVATE: u32 = 0;
const PROCESS_VIRTQ_RX: u32 = 1;
const PROCESS_VIRTQ_TX: u32 = 2;
const PROCESS_TAP_RX: u32 = 3;
const PROCESS_RX_RATE_LIMITER: u32 = 4;
const PROCESS_TX_RATE_LIMITER: u32 = 5;
fn register_runtime_events(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.queue_evts[RX_INDEX],
Self::PROCESS_VIRTQ_RX,
EventSet::IN,
)) {
error!("Failed to register rx queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.queue_evts[TX_INDEX],
Self::PROCESS_VIRTQ_TX,
EventSet::IN,
)) {
error!("Failed to register tx queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.rx_rate_limiter,
Self::PROCESS_RX_RATE_LIMITER,
EventSet::IN,
)) {
error!("Failed to register rx queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.tx_rate_limiter,
Self::PROCESS_TX_RATE_LIMITER,
EventSet::IN,
)) {
error!("Failed to register tx queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.tap,
Self::PROCESS_TAP_RX,
EventSet::IN | EventSet::EDGE_TRIGGERED,
)) {
error!("Failed to register tap event: {}", err);
}
}
fn register_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to register activate event: {}", err);
}
}
fn process_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = self.activate_evt.read() {
error!("Failed to consume net activate event: {:?}", err);
}
self.register_runtime_events(ops);
if let Err(err) = ops.remove(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to un-register activate event: {}", err);
}
}
}
impl MutEventSubscriber for Net {
fn process(&mut self, event: Events, ops: &mut EventOps) {
let source = event.data();
let event_set = event.event_set();
// TODO: also check for errors. Pending high level discussions on how we want
// to handle errors in devices.
let supported_events = EventSet::IN;
if !supported_events.contains(event_set) {
warn!(
"Received unknown event: {:?} from source: {:?}",
event_set, source
);
return;
}
if self.is_activated() {
match source {
Self::PROCESS_ACTIVATE => self.process_activate_event(ops),
Self::PROCESS_VIRTQ_RX => self.process_rx_queue_event(),
Self::PROCESS_VIRTQ_TX => self.process_tx_queue_event(),
Self::PROCESS_TAP_RX => self.process_tap_rx_event(),
Self::PROCESS_RX_RATE_LIMITER => self.process_rx_rate_limiter_event(),
Self::PROCESS_TX_RATE_LIMITER => self.process_tx_rate_limiter_event(),
_ => {
warn!("Net: Spurious event received: {:?}", source);
self.metrics.event_fails.inc();
}
}
} else {
warn!(
"Net: The device is not yet activated. Spurious event received: {:?}",
source
);
}
}
fn init(&mut self, ops: &mut EventOps) {
// This function can be called during different points in the device lifetime:
// - shortly after device creation,
// - on device activation (is-activated already true at this point),
// - on device restore from snapshot.
if self.is_activated() {
self.register_runtime_events(ops);
} else {
self.register_activate_event(ops);
}
}
}
#[cfg(test)]
pub mod tests {
use crate::devices::virtio::net::test_utils::NetQueue;
use crate::devices::virtio::net::test_utils::test::TestHelper;
use crate::devices::virtio::net::{MAX_BUFFER_SIZE, TX_INDEX};
use crate::test_utils::single_region_mem;
#[test]
fn test_event_handler() {
let mem = single_region_mem(2 * MAX_BUFFER_SIZE);
let mut th = TestHelper::get_default(&mem);
// Push a queue event, use the TX_QUEUE_EVENT in this test.
th.add_desc_chain(NetQueue::Tx, 0, &[(0, 4096, 0)]);
// EventManager should report no events since net has only registered
// its activation event so far (even though there is also a queue event pending).
let ev_count = th.event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 0);
// Manually force a queue event and check it's ignored pre-activation.
th.net().queue_evts[TX_INDEX].write(1).unwrap();
let ev_count = th.event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 0);
// Validate there was no queue operation.
assert_eq!(th.txq.used.idx.get(), 0);
// Now activate the device.
th.activate_net();
// Handle the previously pushed queue event through EventManager.
th.event_manager
.run_with_timeout(50)
.expect("Metrics event timeout or error.");
// Make sure the data queue advanced.
assert_eq!(th.txq.used.idx.get(), 1);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/metrics.rs | src/vmm/src/devices/virtio/net/metrics.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system for Network devices.
//!
//! # Metrics format
//! The metrics are flushed in JSON when requested by vmm::logger::metrics::METRICS.write().
//!
//! ## JSON example with metrics:
//! ```json
//! {
//! "net_eth0": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "mac_address_updates": "SharedIncMetric",
//! "no_rx_avail_buffer": "SharedIncMetric",
//! "no_tx_avail_buffer": "SharedIncMetric",
//! ...
//! }
//! "net_eth1": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "mac_address_updates": "SharedIncMetric",
//! "no_rx_avail_buffer": "SharedIncMetric",
//! "no_tx_avail_buffer": "SharedIncMetric",
//! ...
//! }
//! ...
//! "net_iface_id": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "mac_address_updates": "SharedIncMetric",
//! "no_rx_avail_buffer": "SharedIncMetric",
//! "no_tx_avail_buffer": "SharedIncMetric",
//! ...
//! }
//! "net": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "mac_address_updates": "SharedIncMetric",
//! "no_rx_avail_buffer": "SharedIncMetric",
//! "no_tx_avail_buffer": "SharedIncMetric",
//! ...
//! }
//! }
//! ```
//! Each `net` field in the example above is a serializable `NetDeviceMetrics` structure
//! collecting metrics such as `activate_fails`, `cfg_fails`, etc. for the network device.
//! `net_eth0` represent metrics for the endpoint "/network-interfaces/eth0",
//! `net_eth1` represent metrics for the endpoint "/network-interfaces/eth1", and
//! `net_iface_id` represent metrics for the endpoint "/network-interfaces/{iface_id}"
//! network device respectively and `net` is the aggregate of all the per device metrics.
//!
//! # Limitations
//! Network device currently do not have `vmm::logger::metrics::StoreMetrics` so aggregate
//! doesn't consider them.
//!
//! # Design
//! The main design goals of this system are:
//! * To improve network device metrics by logging them at per device granularity.
//! * Continue to provide aggregate net metrics to maintain backward compatibility.
//! * Move NetDeviceMetrics out of from logger and decouple it.
//! * Use lockless operations, preferably ones that don't require anything other than simple
//! reads/writes being atomic.
//! * Rely on `serde` to provide the actual serialization for writing the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them, to
//! avoid having to initialize everything by hand.
//!
//! * Devices could be created in any order i.e. the first device created could either be eth0 or
//! eth1 so if we use a vector for NetDeviceMetrics and call 1st device as net0, then net0 could
//! sometimes point to eth0 and sometimes to eth1 which doesn't help with analysing the metrics.
//! So, use Map instead of Vec to help understand which interface the metrics actually belongs to.
//! * We use "net_$iface_id" for the metrics name instead of "net_$tap_name" to be consistent with
//! the net endpoint "/network-interfaces/{iface_id}".
//!
//! The system implements 1 types of metrics:
//! * Shared Incremental Metrics (SharedIncMetrics) - dedicated for the metrics which need a counter
//! (i.e the number of times an API request failed). These metrics are reset upon flush.
//!
//! We use net::metrics::METRICS instead of adding an entry of NetDeviceMetrics
//! in Net so that metrics are accessible to be flushed even from signal handlers.
use std::collections::BTreeMap;
use std::sync::{Arc, RwLock};
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use crate::logger::{IncMetric, LatencyAggregateMetrics, SharedIncMetric};
/// map of network interface id and metrics
/// this should be protected by a lock before accessing.
#[derive(Debug)]
pub struct NetMetricsPerDevice {
/// used to access per net device metrics
pub metrics: BTreeMap<String, Arc<NetDeviceMetrics>>,
}
impl NetMetricsPerDevice {
/// Allocate `NetDeviceMetrics` for net device having
/// id `iface_id`. Also, allocate only if it doesn't
/// exist to avoid overwriting previously allocated data.
/// lock is always initialized so it is safe the unwrap
/// the lock without a check.
pub fn alloc(iface_id: String) -> Arc<NetDeviceMetrics> {
Arc::clone(
METRICS
.write()
.unwrap()
.metrics
.entry(iface_id)
.or_insert_with(|| Arc::new(NetDeviceMetrics::default())),
)
}
}
/// Pool of Network-related metrics per device behind a lock to
/// keep things thread safe. Since the lock is initialized here
/// it is safe to unwrap it without any check.
static METRICS: RwLock<NetMetricsPerDevice> = RwLock::new(NetMetricsPerDevice {
metrics: BTreeMap::new(),
});
/// This function facilitates aggregation and serialization of
/// per net device metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let net_metrics = METRICS.read().unwrap();
let metrics_len = net_metrics.metrics.len();
// +1 to accomodate aggregate net metrics
let mut seq = serializer.serialize_map(Some(1 + metrics_len))?;
let mut net_aggregated: NetDeviceMetrics = NetDeviceMetrics::default();
for (name, metrics) in net_metrics.metrics.iter() {
let devn = format!("net_{}", name);
// serialization will flush the metrics so aggregate before it.
let m: &NetDeviceMetrics = metrics;
net_aggregated.aggregate(m);
seq.serialize_entry(&devn, m)?;
}
seq.serialize_entry("net", &net_aggregated)?;
seq.end()
}
/// Network-related metrics.
#[derive(Default, Debug, Serialize)]
pub struct NetDeviceMetrics {
/// Number of times when activate failed on a network device.
pub activate_fails: SharedIncMetric,
/// Number of times when interacting with the space config of a network device failed.
pub cfg_fails: SharedIncMetric,
/// Number of times the mac address was updated through the config space.
pub mac_address_updates: SharedIncMetric,
/// No available buffer for the net device rx queue.
pub no_rx_avail_buffer: SharedIncMetric,
/// No available buffer for the net device tx queue.
pub no_tx_avail_buffer: SharedIncMetric,
/// Number of times when handling events on a network device failed.
pub event_fails: SharedIncMetric,
/// Number of events associated with the receiving queue.
pub rx_queue_event_count: SharedIncMetric,
/// Number of events associated with the rate limiter installed on the receiving path.
pub rx_event_rate_limiter_count: SharedIncMetric,
/// Number of RX rate limiter throttling events.
pub rx_rate_limiter_throttled: SharedIncMetric,
/// Number of events received on the associated tap.
pub rx_tap_event_count: SharedIncMetric,
/// Number of bytes received.
pub rx_bytes_count: SharedIncMetric,
/// Number of packets received.
pub rx_packets_count: SharedIncMetric,
/// Number of errors while receiving data.
pub rx_fails: SharedIncMetric,
/// Number of successful read operations while receiving data.
pub rx_count: SharedIncMetric,
/// Number of times reading from TAP failed.
pub tap_read_fails: SharedIncMetric,
/// Number of times writing to TAP failed.
pub tap_write_fails: SharedIncMetric,
/// Duration of all tap write operations.
pub tap_write_agg: LatencyAggregateMetrics,
/// Number of transmitted bytes.
pub tx_bytes_count: SharedIncMetric,
/// Number of malformed TX frames.
pub tx_malformed_frames: SharedIncMetric,
/// Number of errors while transmitting data.
pub tx_fails: SharedIncMetric,
/// Number of successful write operations while transmitting data.
pub tx_count: SharedIncMetric,
/// Number of transmitted packets.
pub tx_packets_count: SharedIncMetric,
/// Number of events associated with the transmitting queue.
pub tx_queue_event_count: SharedIncMetric,
/// Number of events associated with the rate limiter installed on the transmitting path.
pub tx_rate_limiter_event_count: SharedIncMetric,
/// Number of RX rate limiter throttling events.
pub tx_rate_limiter_throttled: SharedIncMetric,
/// Number of packets with a spoofed mac, sent by the guest.
pub tx_spoofed_mac_count: SharedIncMetric,
/// Number of remaining requests in the TX queue.
pub tx_remaining_reqs_count: SharedIncMetric,
}
impl NetDeviceMetrics {
/// Const default construction.
pub fn new() -> Self {
Self {
tap_write_agg: LatencyAggregateMetrics::new(),
..Default::default()
}
}
/// Net metrics are SharedIncMetric where the diff of current vs
/// old is serialized i.e. serialize_u64(current-old).
/// So to have the aggregate serialized in same way we need to
/// fetch the diff of current vs old metrics and add it to the
/// aggregate.
pub fn aggregate(&mut self, other: &Self) {
self.activate_fails.add(other.activate_fails.fetch_diff());
self.cfg_fails.add(other.cfg_fails.fetch_diff());
self.mac_address_updates
.add(other.mac_address_updates.fetch_diff());
self.no_rx_avail_buffer
.add(other.no_rx_avail_buffer.fetch_diff());
self.no_tx_avail_buffer
.add(other.no_tx_avail_buffer.fetch_diff());
self.event_fails.add(other.event_fails.fetch_diff());
self.rx_queue_event_count
.add(other.rx_queue_event_count.fetch_diff());
self.rx_event_rate_limiter_count
.add(other.rx_event_rate_limiter_count.fetch_diff());
self.rx_rate_limiter_throttled
.add(other.rx_rate_limiter_throttled.fetch_diff());
self.rx_tap_event_count
.add(other.rx_tap_event_count.fetch_diff());
self.rx_bytes_count.add(other.rx_bytes_count.fetch_diff());
self.rx_packets_count
.add(other.rx_packets_count.fetch_diff());
self.rx_fails.add(other.rx_fails.fetch_diff());
self.rx_count.add(other.rx_count.fetch_diff());
self.tap_read_fails.add(other.tap_read_fails.fetch_diff());
self.tap_write_fails.add(other.tap_write_fails.fetch_diff());
self.tap_write_agg
.sum_us
.add(other.tap_write_agg.sum_us.fetch_diff());
self.tx_bytes_count.add(other.tx_bytes_count.fetch_diff());
self.tx_malformed_frames
.add(other.tx_malformed_frames.fetch_diff());
self.tx_fails.add(other.tx_fails.fetch_diff());
self.tx_count.add(other.tx_count.fetch_diff());
self.tx_packets_count
.add(other.tx_packets_count.fetch_diff());
self.tx_queue_event_count
.add(other.tx_queue_event_count.fetch_diff());
self.tx_rate_limiter_event_count
.add(other.tx_rate_limiter_event_count.fetch_diff());
self.tx_rate_limiter_throttled
.add(other.tx_rate_limiter_throttled.fetch_diff());
self.tx_spoofed_mac_count
.add(other.tx_spoofed_mac_count.fetch_diff());
self.tx_remaining_reqs_count
.add(other.tx_remaining_reqs_count.fetch_diff());
}
}
#[cfg(test)]
pub mod tests {
use super::*;
#[test]
fn test_max_net_dev_metrics() {
// Note: this test has nothing to do with
// Net structure or IRQs, this is just to allocate
// metrics for max number of devices that system can have.
// we have 5-23 irq for net devices so max 19 net devices.
const MAX_NET_DEVICES: usize = 19;
drop(METRICS.read().unwrap());
drop(METRICS.write().unwrap());
for i in 0..MAX_NET_DEVICES {
let devn: String = format!("eth{}", i);
NetMetricsPerDevice::alloc(devn.clone());
METRICS
.read()
.unwrap()
.metrics
.get(&devn)
.unwrap()
.activate_fails
.inc();
METRICS
.read()
.unwrap()
.metrics
.get(&devn)
.unwrap()
.rx_bytes_count
.add(10);
METRICS
.read()
.unwrap()
.metrics
.get(&devn)
.unwrap()
.tx_bytes_count
.add(5);
}
for i in 0..MAX_NET_DEVICES {
let devn: String = format!("eth{}", i);
assert!(
METRICS
.read()
.unwrap()
.metrics
.get(&devn)
.unwrap()
.activate_fails
.count()
>= 1
);
assert!(
METRICS
.read()
.unwrap()
.metrics
.get(&devn)
.unwrap()
.rx_bytes_count
.count()
>= 10
);
assert_eq!(
METRICS
.read()
.unwrap()
.metrics
.get(&devn)
.unwrap()
.tx_bytes_count
.count(),
5
);
}
}
#[test]
fn test_signle_net_dev_metrics() {
// Use eth0 so that we can check thread safety with the
// `test_net_dev_metrics` which also uses the same name.
let devn = "eth0";
drop(METRICS.read().unwrap());
drop(METRICS.write().unwrap());
NetMetricsPerDevice::alloc(String::from(devn));
METRICS.read().unwrap().metrics.get(devn).unwrap();
METRICS
.read()
.unwrap()
.metrics
.get(devn)
.unwrap()
.activate_fails
.inc();
assert!(
METRICS
.read()
.unwrap()
.metrics
.get(devn)
.unwrap()
.activate_fails
.count()
> 0,
"{}",
METRICS
.read()
.unwrap()
.metrics
.get(devn)
.unwrap()
.activate_fails
.count()
);
// we expect only 2 tests (this and test_max_net_dev_metrics)
// to update activate_fails count for eth0.
assert!(
METRICS
.read()
.unwrap()
.metrics
.get(devn)
.unwrap()
.activate_fails
.count()
<= 2,
"{}",
METRICS
.read()
.unwrap()
.metrics
.get(devn)
.unwrap()
.activate_fails
.count()
);
METRICS
.read()
.unwrap()
.metrics
.get(devn)
.unwrap()
.activate_fails
.inc();
METRICS
.read()
.unwrap()
.metrics
.get(devn)
.unwrap()
.rx_bytes_count
.add(5);
assert!(
METRICS
.read()
.unwrap()
.metrics
.get(devn)
.unwrap()
.rx_bytes_count
.count()
>= 5
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/generated/if_tun.rs | src/vmm/src/devices/virtio/net/generated/if_tun.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const ETH_ALEN: u32 = 6;
pub const ETH_TLEN: u32 = 2;
pub const ETH_HLEN: u32 = 14;
pub const ETH_ZLEN: u32 = 60;
pub const ETH_DATA_LEN: u32 = 1500;
pub const ETH_FRAME_LEN: u32 = 1514;
pub const ETH_FCS_LEN: u32 = 4;
pub const ETH_MIN_MTU: u32 = 68;
pub const ETH_MAX_MTU: u32 = 65535;
pub const ETH_P_LOOP: u32 = 96;
pub const ETH_P_PUP: u32 = 512;
pub const ETH_P_PUPAT: u32 = 513;
pub const ETH_P_TSN: u32 = 8944;
pub const ETH_P_ERSPAN2: u32 = 8939;
pub const ETH_P_IP: u32 = 2048;
pub const ETH_P_X25: u32 = 2053;
pub const ETH_P_ARP: u32 = 2054;
pub const ETH_P_BPQ: u32 = 2303;
pub const ETH_P_IEEEPUP: u32 = 2560;
pub const ETH_P_IEEEPUPAT: u32 = 2561;
pub const ETH_P_BATMAN: u32 = 17157;
pub const ETH_P_DEC: u32 = 24576;
pub const ETH_P_DNA_DL: u32 = 24577;
pub const ETH_P_DNA_RC: u32 = 24578;
pub const ETH_P_DNA_RT: u32 = 24579;
pub const ETH_P_LAT: u32 = 24580;
pub const ETH_P_DIAG: u32 = 24581;
pub const ETH_P_CUST: u32 = 24582;
pub const ETH_P_SCA: u32 = 24583;
pub const ETH_P_TEB: u32 = 25944;
pub const ETH_P_RARP: u32 = 32821;
pub const ETH_P_ATALK: u32 = 32923;
pub const ETH_P_AARP: u32 = 33011;
pub const ETH_P_8021Q: u32 = 33024;
pub const ETH_P_ERSPAN: u32 = 35006;
pub const ETH_P_IPX: u32 = 33079;
pub const ETH_P_IPV6: u32 = 34525;
pub const ETH_P_PAUSE: u32 = 34824;
pub const ETH_P_SLOW: u32 = 34825;
pub const ETH_P_WCCP: u32 = 34878;
pub const ETH_P_MPLS_UC: u32 = 34887;
pub const ETH_P_MPLS_MC: u32 = 34888;
pub const ETH_P_ATMMPOA: u32 = 34892;
pub const ETH_P_PPP_DISC: u32 = 34915;
pub const ETH_P_PPP_SES: u32 = 34916;
pub const ETH_P_LINK_CTL: u32 = 34924;
pub const ETH_P_ATMFATE: u32 = 34948;
pub const ETH_P_PAE: u32 = 34958;
pub const ETH_P_PROFINET: u32 = 34962;
pub const ETH_P_REALTEK: u32 = 34969;
pub const ETH_P_AOE: u32 = 34978;
pub const ETH_P_ETHERCAT: u32 = 34980;
pub const ETH_P_8021AD: u32 = 34984;
pub const ETH_P_802_EX1: u32 = 34997;
pub const ETH_P_PREAUTH: u32 = 35015;
pub const ETH_P_TIPC: u32 = 35018;
pub const ETH_P_LLDP: u32 = 35020;
pub const ETH_P_MRP: u32 = 35043;
pub const ETH_P_MACSEC: u32 = 35045;
pub const ETH_P_8021AH: u32 = 35047;
pub const ETH_P_MVRP: u32 = 35061;
pub const ETH_P_1588: u32 = 35063;
pub const ETH_P_NCSI: u32 = 35064;
pub const ETH_P_PRP: u32 = 35067;
pub const ETH_P_CFM: u32 = 35074;
pub const ETH_P_FCOE: u32 = 35078;
pub const ETH_P_IBOE: u32 = 35093;
pub const ETH_P_TDLS: u32 = 35085;
pub const ETH_P_FIP: u32 = 35092;
pub const ETH_P_80221: u32 = 35095;
pub const ETH_P_HSR: u32 = 35119;
pub const ETH_P_NSH: u32 = 35151;
pub const ETH_P_LOOPBACK: u32 = 36864;
pub const ETH_P_QINQ1: u32 = 37120;
pub const ETH_P_QINQ2: u32 = 37376;
pub const ETH_P_QINQ3: u32 = 37632;
pub const ETH_P_EDSA: u32 = 56026;
pub const ETH_P_DSA_8021Q: u32 = 56027;
pub const ETH_P_DSA_A5PSW: u32 = 57345;
pub const ETH_P_IFE: u32 = 60734;
pub const ETH_P_AF_IUCV: u32 = 64507;
pub const ETH_P_802_3_MIN: u32 = 1536;
pub const ETH_P_802_3: u32 = 1;
pub const ETH_P_AX25: u32 = 2;
pub const ETH_P_ALL: u32 = 3;
pub const ETH_P_802_2: u32 = 4;
pub const ETH_P_SNAP: u32 = 5;
pub const ETH_P_DDCMP: u32 = 6;
pub const ETH_P_WAN_PPP: u32 = 7;
pub const ETH_P_PPP_MP: u32 = 8;
pub const ETH_P_LOCALTALK: u32 = 9;
pub const ETH_P_CAN: u32 = 12;
pub const ETH_P_CANFD: u32 = 13;
pub const ETH_P_CANXL: u32 = 14;
pub const ETH_P_PPPTALK: u32 = 16;
pub const ETH_P_TR_802_2: u32 = 17;
pub const ETH_P_MOBITEX: u32 = 21;
pub const ETH_P_CONTROL: u32 = 22;
pub const ETH_P_IRDA: u32 = 23;
pub const ETH_P_ECONET: u32 = 24;
pub const ETH_P_HDLC: u32 = 25;
pub const ETH_P_ARCNET: u32 = 26;
pub const ETH_P_DSA: u32 = 27;
pub const ETH_P_TRAILER: u32 = 28;
pub const ETH_P_PHONET: u32 = 245;
pub const ETH_P_IEEE802154: u32 = 246;
pub const ETH_P_CAIF: u32 = 247;
pub const ETH_P_XDSA: u32 = 248;
pub const ETH_P_MAP: u32 = 249;
pub const ETH_P_MCTP: u32 = 250;
pub const TUN_READQ_SIZE: u32 = 500;
pub const TUN_TYPE_MASK: u32 = 15;
pub const IFF_TAP: u32 = 2;
pub const IFF_NO_PI: u32 = 4096;
pub const IFF_VNET_HDR: u32 = 16384;
pub const IFF_MULTI_QUEUE: u32 = 256;
pub const TUN_TX_TIMESTAMP: u32 = 1;
pub const TUN_F_CSUM: u32 = 1;
pub const TUN_F_TSO4: u32 = 2;
pub const TUN_F_TSO6: u32 = 4;
pub const TUN_F_TSO_ECN: u32 = 8;
pub const TUN_F_UFO: u32 = 16;
pub const TUN_F_USO4: u32 = 32;
pub const TUN_F_USO6: u32 = 64;
pub const TUN_PKT_STRIP: u32 = 1;
pub const TUN_FLT_ALLMULTI: u32 = 1;
pub type __u8 = ::std::os::raw::c_uchar;
pub type __u16 = ::std::os::raw::c_ushort;
pub type __u32 = ::std::os::raw::c_uint;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct sock_filter {
pub code: __u16,
pub jt: __u8,
pub jf: __u8,
pub k: __u32,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of sock_filter"][::std::mem::size_of::<sock_filter>() - 8usize];
["Alignment of sock_filter"][::std::mem::align_of::<sock_filter>() - 4usize];
["Offset of field: sock_filter::code"][::std::mem::offset_of!(sock_filter, code) - 0usize];
["Offset of field: sock_filter::jt"][::std::mem::offset_of!(sock_filter, jt) - 2usize];
["Offset of field: sock_filter::jf"][::std::mem::offset_of!(sock_filter, jf) - 3usize];
["Offset of field: sock_filter::k"][::std::mem::offset_of!(sock_filter, k) - 4usize];
};
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct sock_fprog {
pub len: ::std::os::raw::c_ushort,
pub filter: *mut sock_filter,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of sock_fprog"][::std::mem::size_of::<sock_fprog>() - 16usize];
["Alignment of sock_fprog"][::std::mem::align_of::<sock_fprog>() - 8usize];
["Offset of field: sock_fprog::len"][::std::mem::offset_of!(sock_fprog, len) - 0usize];
["Offset of field: sock_fprog::filter"][::std::mem::offset_of!(sock_fprog, filter) - 8usize];
};
impl Default for sock_fprog {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/generated/sockios.rs | src/vmm/src/devices/virtio/net/generated/sockios.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const __BITS_PER_LONG: u32 = 64;
pub const __BITS_PER_LONG_LONG: u32 = 64;
pub const FIOSETOWN: u32 = 35073;
pub const SIOCSPGRP: u32 = 35074;
pub const FIOGETOWN: u32 = 35075;
pub const SIOCGPGRP: u32 = 35076;
pub const SIOCATMARK: u32 = 35077;
pub const SIOCGSTAMP_OLD: u32 = 35078;
pub const SIOCGSTAMPNS_OLD: u32 = 35079;
pub const SOCK_IOC_TYPE: u32 = 137;
pub const SIOCGSTAMP: u32 = 35078;
pub const SIOCGSTAMPNS: u32 = 35079;
pub const SIOCADDRT: u32 = 35083;
pub const SIOCDELRT: u32 = 35084;
pub const SIOCRTMSG: u32 = 35085;
pub const SIOCGIFNAME: u32 = 35088;
pub const SIOCSIFLINK: u32 = 35089;
pub const SIOCGIFCONF: u32 = 35090;
pub const SIOCGIFFLAGS: u32 = 35091;
pub const SIOCSIFFLAGS: u32 = 35092;
pub const SIOCGIFADDR: u32 = 35093;
pub const SIOCSIFADDR: u32 = 35094;
pub const SIOCGIFDSTADDR: u32 = 35095;
pub const SIOCSIFDSTADDR: u32 = 35096;
pub const SIOCGIFBRDADDR: u32 = 35097;
pub const SIOCSIFBRDADDR: u32 = 35098;
pub const SIOCGIFNETMASK: u32 = 35099;
pub const SIOCSIFNETMASK: u32 = 35100;
pub const SIOCGIFMETRIC: u32 = 35101;
pub const SIOCSIFMETRIC: u32 = 35102;
pub const SIOCGIFMEM: u32 = 35103;
pub const SIOCSIFMEM: u32 = 35104;
pub const SIOCGIFMTU: u32 = 35105;
pub const SIOCSIFMTU: u32 = 35106;
pub const SIOCSIFNAME: u32 = 35107;
pub const SIOCSIFHWADDR: u32 = 35108;
pub const SIOCGIFENCAP: u32 = 35109;
pub const SIOCSIFENCAP: u32 = 35110;
pub const SIOCGIFHWADDR: u32 = 35111;
pub const SIOCGIFSLAVE: u32 = 35113;
pub const SIOCSIFSLAVE: u32 = 35120;
pub const SIOCADDMULTI: u32 = 35121;
pub const SIOCDELMULTI: u32 = 35122;
pub const SIOCGIFINDEX: u32 = 35123;
pub const SIOGIFINDEX: u32 = 35123;
pub const SIOCSIFPFLAGS: u32 = 35124;
pub const SIOCGIFPFLAGS: u32 = 35125;
pub const SIOCDIFADDR: u32 = 35126;
pub const SIOCSIFHWBROADCAST: u32 = 35127;
pub const SIOCGIFCOUNT: u32 = 35128;
pub const SIOCGIFBR: u32 = 35136;
pub const SIOCSIFBR: u32 = 35137;
pub const SIOCGIFTXQLEN: u32 = 35138;
pub const SIOCSIFTXQLEN: u32 = 35139;
pub const SIOCETHTOOL: u32 = 35142;
pub const SIOCGMIIPHY: u32 = 35143;
pub const SIOCGMIIREG: u32 = 35144;
pub const SIOCSMIIREG: u32 = 35145;
pub const SIOCWANDEV: u32 = 35146;
pub const SIOCOUTQNSD: u32 = 35147;
pub const SIOCGSKNS: u32 = 35148;
pub const SIOCDARP: u32 = 35155;
pub const SIOCGARP: u32 = 35156;
pub const SIOCSARP: u32 = 35157;
pub const SIOCDRARP: u32 = 35168;
pub const SIOCGRARP: u32 = 35169;
pub const SIOCSRARP: u32 = 35170;
pub const SIOCGIFMAP: u32 = 35184;
pub const SIOCSIFMAP: u32 = 35185;
pub const SIOCADDDLCI: u32 = 35200;
pub const SIOCDELDLCI: u32 = 35201;
pub const SIOCGIFVLAN: u32 = 35202;
pub const SIOCSIFVLAN: u32 = 35203;
pub const SIOCBONDENSLAVE: u32 = 35216;
pub const SIOCBONDRELEASE: u32 = 35217;
pub const SIOCBONDSETHWADDR: u32 = 35218;
pub const SIOCBONDSLAVEINFOQUERY: u32 = 35219;
pub const SIOCBONDINFOQUERY: u32 = 35220;
pub const SIOCBONDCHANGEACTIVE: u32 = 35221;
pub const SIOCBRADDBR: u32 = 35232;
pub const SIOCBRDELBR: u32 = 35233;
pub const SIOCBRADDIF: u32 = 35234;
pub const SIOCBRDELIF: u32 = 35235;
pub const SIOCSHWTSTAMP: u32 = 35248;
pub const SIOCGHWTSTAMP: u32 = 35249;
pub const SIOCDEVPRIVATE: u32 = 35312;
pub const SIOCPROTOPRIVATE: u32 = 35296;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/generated/mod.rs | src/vmm/src/devices/virtio/net/generated/mod.rs | // Copyright TUNTAP, 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
#![allow(clippy::all)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
// generated with bindgen /usr/include/linux/if.h --no-unstable-rust
// --constified-enum '*' --with-derive-default -- -D __UAPI_DEF_IF_IFNAMSIZ -D
// __UAPI_DEF_IF_NET_DEVICE_FLAGS -D __UAPI_DEF_IF_IFREQ -D __UAPI_DEF_IF_IFMAP
// Name is "iff" to avoid conflicting with "if" keyword.
// Generated against Linux 4.11 to include fix "uapi: fix linux/if.h userspace
// compilation errors".
// Manual fixup of ifrn_name to be of type c_uchar instead of c_char.
pub mod iff;
// generated with bindgen /usr/include/linux/if_tun.h --no-unstable-rust
// --constified-enum '*' --with-derive-default
pub mod if_tun;
// generated with bindgen /usr/include/linux/sockios.h --no-unstable-rust
// --constified-enum '*' --with-derive-default
pub mod sockios;
pub use if_tun::*;
pub use iff::*;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/net/generated/iff.rs | src/vmm/src/devices/virtio/net/generated/iff.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const IFNAMSIZ: u32 = 16;
pub const IFALIASZ: u32 = 256;
pub const IF_GET_IFACE: u32 = 1;
pub const IF_GET_PROTO: u32 = 2;
pub const IF_IFACE_V35: u32 = 4096;
pub const IF_IFACE_V24: u32 = 4097;
pub const IF_IFACE_X21: u32 = 4098;
pub const IF_IFACE_T1: u32 = 4099;
pub const IF_IFACE_E1: u32 = 4100;
pub const IF_IFACE_SYNC_SERIAL: u32 = 4101;
pub const IF_IFACE_X21D: u32 = 4102;
pub const IF_PROTO_HDLC: u32 = 8192;
pub const IF_PROTO_PPP: u32 = 8193;
pub const IF_PROTO_CISCO: u32 = 8194;
pub const IF_PROTO_FR: u32 = 8195;
pub const IF_PROTO_FR_ADD_PVC: u32 = 8196;
pub const IF_PROTO_FR_DEL_PVC: u32 = 8197;
pub const IF_PROTO_X25: u32 = 8198;
pub const IF_PROTO_HDLC_ETH: u32 = 8199;
pub const IF_PROTO_FR_ADD_ETH_PVC: u32 = 8200;
pub const IF_PROTO_FR_DEL_ETH_PVC: u32 = 8201;
pub const IF_PROTO_FR_PVC: u32 = 8202;
pub const IF_PROTO_FR_ETH_PVC: u32 = 8203;
pub const IF_PROTO_RAW: u32 = 8204;
pub const IFHWADDRLEN: u32 = 6;
pub type sa_family_t = ::std::os::raw::c_ushort;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct sockaddr {
pub sa_family: sa_family_t,
pub sa_data: [::std::os::raw::c_char; 14usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of sockaddr"][::std::mem::size_of::<sockaddr>() - 16usize];
["Alignment of sockaddr"][::std::mem::align_of::<sockaddr>() - 2usize];
["Offset of field: sockaddr::sa_family"][::std::mem::offset_of!(sockaddr, sa_family) - 0usize];
["Offset of field: sockaddr::sa_data"][::std::mem::offset_of!(sockaddr, sa_data) - 2usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct sync_serial_settings {
pub clock_rate: ::std::os::raw::c_uint,
pub clock_type: ::std::os::raw::c_uint,
pub loopback: ::std::os::raw::c_ushort,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of sync_serial_settings"][::std::mem::size_of::<sync_serial_settings>() - 12usize];
["Alignment of sync_serial_settings"][::std::mem::align_of::<sync_serial_settings>() - 4usize];
["Offset of field: sync_serial_settings::clock_rate"]
[::std::mem::offset_of!(sync_serial_settings, clock_rate) - 0usize];
["Offset of field: sync_serial_settings::clock_type"]
[::std::mem::offset_of!(sync_serial_settings, clock_type) - 4usize];
["Offset of field: sync_serial_settings::loopback"]
[::std::mem::offset_of!(sync_serial_settings, loopback) - 8usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct te1_settings {
pub clock_rate: ::std::os::raw::c_uint,
pub clock_type: ::std::os::raw::c_uint,
pub loopback: ::std::os::raw::c_ushort,
pub slot_map: ::std::os::raw::c_uint,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of te1_settings"][::std::mem::size_of::<te1_settings>() - 16usize];
["Alignment of te1_settings"][::std::mem::align_of::<te1_settings>() - 4usize];
["Offset of field: te1_settings::clock_rate"]
[::std::mem::offset_of!(te1_settings, clock_rate) - 0usize];
["Offset of field: te1_settings::clock_type"]
[::std::mem::offset_of!(te1_settings, clock_type) - 4usize];
["Offset of field: te1_settings::loopback"]
[::std::mem::offset_of!(te1_settings, loopback) - 8usize];
["Offset of field: te1_settings::slot_map"]
[::std::mem::offset_of!(te1_settings, slot_map) - 12usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct raw_hdlc_proto {
pub encoding: ::std::os::raw::c_ushort,
pub parity: ::std::os::raw::c_ushort,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of raw_hdlc_proto"][::std::mem::size_of::<raw_hdlc_proto>() - 4usize];
["Alignment of raw_hdlc_proto"][::std::mem::align_of::<raw_hdlc_proto>() - 2usize];
["Offset of field: raw_hdlc_proto::encoding"]
[::std::mem::offset_of!(raw_hdlc_proto, encoding) - 0usize];
["Offset of field: raw_hdlc_proto::parity"]
[::std::mem::offset_of!(raw_hdlc_proto, parity) - 2usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct fr_proto {
pub t391: ::std::os::raw::c_uint,
pub t392: ::std::os::raw::c_uint,
pub n391: ::std::os::raw::c_uint,
pub n392: ::std::os::raw::c_uint,
pub n393: ::std::os::raw::c_uint,
pub lmi: ::std::os::raw::c_ushort,
pub dce: ::std::os::raw::c_ushort,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of fr_proto"][::std::mem::size_of::<fr_proto>() - 24usize];
["Alignment of fr_proto"][::std::mem::align_of::<fr_proto>() - 4usize];
["Offset of field: fr_proto::t391"][::std::mem::offset_of!(fr_proto, t391) - 0usize];
["Offset of field: fr_proto::t392"][::std::mem::offset_of!(fr_proto, t392) - 4usize];
["Offset of field: fr_proto::n391"][::std::mem::offset_of!(fr_proto, n391) - 8usize];
["Offset of field: fr_proto::n392"][::std::mem::offset_of!(fr_proto, n392) - 12usize];
["Offset of field: fr_proto::n393"][::std::mem::offset_of!(fr_proto, n393) - 16usize];
["Offset of field: fr_proto::lmi"][::std::mem::offset_of!(fr_proto, lmi) - 20usize];
["Offset of field: fr_proto::dce"][::std::mem::offset_of!(fr_proto, dce) - 22usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct fr_proto_pvc {
pub dlci: ::std::os::raw::c_uint,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of fr_proto_pvc"][::std::mem::size_of::<fr_proto_pvc>() - 4usize];
["Alignment of fr_proto_pvc"][::std::mem::align_of::<fr_proto_pvc>() - 4usize];
["Offset of field: fr_proto_pvc::dlci"][::std::mem::offset_of!(fr_proto_pvc, dlci) - 0usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct fr_proto_pvc_info {
pub dlci: ::std::os::raw::c_uint,
pub master: [::std::os::raw::c_char; 16usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of fr_proto_pvc_info"][::std::mem::size_of::<fr_proto_pvc_info>() - 20usize];
["Alignment of fr_proto_pvc_info"][::std::mem::align_of::<fr_proto_pvc_info>() - 4usize];
["Offset of field: fr_proto_pvc_info::dlci"]
[::std::mem::offset_of!(fr_proto_pvc_info, dlci) - 0usize];
["Offset of field: fr_proto_pvc_info::master"]
[::std::mem::offset_of!(fr_proto_pvc_info, master) - 4usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct cisco_proto {
pub interval: ::std::os::raw::c_uint,
pub timeout: ::std::os::raw::c_uint,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of cisco_proto"][::std::mem::size_of::<cisco_proto>() - 8usize];
["Alignment of cisco_proto"][::std::mem::align_of::<cisco_proto>() - 4usize];
["Offset of field: cisco_proto::interval"]
[::std::mem::offset_of!(cisco_proto, interval) - 0usize];
["Offset of field: cisco_proto::timeout"]
[::std::mem::offset_of!(cisco_proto, timeout) - 4usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct x25_hdlc_proto {
pub dce: ::std::os::raw::c_ushort,
pub modulo: ::std::os::raw::c_uint,
pub window: ::std::os::raw::c_uint,
pub t1: ::std::os::raw::c_uint,
pub t2: ::std::os::raw::c_uint,
pub n2: ::std::os::raw::c_uint,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of x25_hdlc_proto"][::std::mem::size_of::<x25_hdlc_proto>() - 24usize];
["Alignment of x25_hdlc_proto"][::std::mem::align_of::<x25_hdlc_proto>() - 4usize];
["Offset of field: x25_hdlc_proto::dce"][::std::mem::offset_of!(x25_hdlc_proto, dce) - 0usize];
["Offset of field: x25_hdlc_proto::modulo"]
[::std::mem::offset_of!(x25_hdlc_proto, modulo) - 4usize];
["Offset of field: x25_hdlc_proto::window"]
[::std::mem::offset_of!(x25_hdlc_proto, window) - 8usize];
["Offset of field: x25_hdlc_proto::t1"][::std::mem::offset_of!(x25_hdlc_proto, t1) - 12usize];
["Offset of field: x25_hdlc_proto::t2"][::std::mem::offset_of!(x25_hdlc_proto, t2) - 16usize];
["Offset of field: x25_hdlc_proto::n2"][::std::mem::offset_of!(x25_hdlc_proto, n2) - 20usize];
};
pub mod net_device_flags {
pub type Type = ::std::os::raw::c_uint;
pub const IFF_UP: Type = 1;
pub const IFF_BROADCAST: Type = 2;
pub const IFF_DEBUG: Type = 4;
pub const IFF_LOOPBACK: Type = 8;
pub const IFF_POINTOPOINT: Type = 16;
pub const IFF_NOTRAILERS: Type = 32;
pub const IFF_RUNNING: Type = 64;
pub const IFF_NOARP: Type = 128;
pub const IFF_PROMISC: Type = 256;
pub const IFF_ALLMULTI: Type = 512;
pub const IFF_MASTER: Type = 1024;
pub const IFF_SLAVE: Type = 2048;
pub const IFF_MULTICAST: Type = 4096;
pub const IFF_PORTSEL: Type = 8192;
pub const IFF_AUTOMEDIA: Type = 16384;
pub const IFF_DYNAMIC: Type = 32768;
pub const IFF_LOWER_UP: Type = 65536;
pub const IFF_DORMANT: Type = 131072;
pub const IFF_ECHO: Type = 262144;
}
pub mod _bindgen_ty_4 {
pub type Type = ::std::os::raw::c_uint;
pub const IF_OPER_UNKNOWN: Type = 0;
pub const IF_OPER_NOTPRESENT: Type = 1;
pub const IF_OPER_DOWN: Type = 2;
pub const IF_OPER_LOWERLAYERDOWN: Type = 3;
pub const IF_OPER_TESTING: Type = 4;
pub const IF_OPER_DORMANT: Type = 5;
pub const IF_OPER_UP: Type = 6;
}
pub mod _bindgen_ty_5 {
pub type Type = ::std::os::raw::c_uint;
pub const IF_LINK_MODE_DEFAULT: Type = 0;
pub const IF_LINK_MODE_DORMANT: Type = 1;
pub const IF_LINK_MODE_TESTING: Type = 2;
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct ifmap {
pub mem_start: ::std::os::raw::c_ulong,
pub mem_end: ::std::os::raw::c_ulong,
pub base_addr: ::std::os::raw::c_ushort,
pub irq: ::std::os::raw::c_uchar,
pub dma: ::std::os::raw::c_uchar,
pub port: ::std::os::raw::c_uchar,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of ifmap"][::std::mem::size_of::<ifmap>() - 24usize];
["Alignment of ifmap"][::std::mem::align_of::<ifmap>() - 8usize];
["Offset of field: ifmap::mem_start"][::std::mem::offset_of!(ifmap, mem_start) - 0usize];
["Offset of field: ifmap::mem_end"][::std::mem::offset_of!(ifmap, mem_end) - 8usize];
["Offset of field: ifmap::base_addr"][::std::mem::offset_of!(ifmap, base_addr) - 16usize];
["Offset of field: ifmap::irq"][::std::mem::offset_of!(ifmap, irq) - 18usize];
["Offset of field: ifmap::dma"][::std::mem::offset_of!(ifmap, dma) - 19usize];
["Offset of field: ifmap::port"][::std::mem::offset_of!(ifmap, port) - 20usize];
};
#[repr(C)]
#[derive(Copy, Clone)]
pub struct if_settings {
pub type_: ::std::os::raw::c_uint,
pub size: ::std::os::raw::c_uint,
pub ifs_ifsu: if_settings__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union if_settings__bindgen_ty_1 {
pub raw_hdlc: *mut raw_hdlc_proto,
pub cisco: *mut cisco_proto,
pub fr: *mut fr_proto,
pub fr_pvc: *mut fr_proto_pvc,
pub fr_pvc_info: *mut fr_proto_pvc_info,
pub x25: *mut x25_hdlc_proto,
pub sync: *mut sync_serial_settings,
pub te1: *mut te1_settings,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of if_settings__bindgen_ty_1"]
[::std::mem::size_of::<if_settings__bindgen_ty_1>() - 8usize];
["Alignment of if_settings__bindgen_ty_1"]
[::std::mem::align_of::<if_settings__bindgen_ty_1>() - 8usize];
["Offset of field: if_settings__bindgen_ty_1::raw_hdlc"]
[::std::mem::offset_of!(if_settings__bindgen_ty_1, raw_hdlc) - 0usize];
["Offset of field: if_settings__bindgen_ty_1::cisco"]
[::std::mem::offset_of!(if_settings__bindgen_ty_1, cisco) - 0usize];
["Offset of field: if_settings__bindgen_ty_1::fr"]
[::std::mem::offset_of!(if_settings__bindgen_ty_1, fr) - 0usize];
["Offset of field: if_settings__bindgen_ty_1::fr_pvc"]
[::std::mem::offset_of!(if_settings__bindgen_ty_1, fr_pvc) - 0usize];
["Offset of field: if_settings__bindgen_ty_1::fr_pvc_info"]
[::std::mem::offset_of!(if_settings__bindgen_ty_1, fr_pvc_info) - 0usize];
["Offset of field: if_settings__bindgen_ty_1::x25"]
[::std::mem::offset_of!(if_settings__bindgen_ty_1, x25) - 0usize];
["Offset of field: if_settings__bindgen_ty_1::sync"]
[::std::mem::offset_of!(if_settings__bindgen_ty_1, sync) - 0usize];
["Offset of field: if_settings__bindgen_ty_1::te1"]
[::std::mem::offset_of!(if_settings__bindgen_ty_1, te1) - 0usize];
};
impl Default for if_settings__bindgen_ty_1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of if_settings"][::std::mem::size_of::<if_settings>() - 16usize];
["Alignment of if_settings"][::std::mem::align_of::<if_settings>() - 8usize];
["Offset of field: if_settings::type_"][::std::mem::offset_of!(if_settings, type_) - 0usize];
["Offset of field: if_settings::size"][::std::mem::offset_of!(if_settings, size) - 4usize];
["Offset of field: if_settings::ifs_ifsu"]
[::std::mem::offset_of!(if_settings, ifs_ifsu) - 8usize];
};
impl Default for if_settings {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct ifreq {
pub ifr_ifrn: ifreq__bindgen_ty_1,
pub ifr_ifru: ifreq__bindgen_ty_2,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union ifreq__bindgen_ty_1 {
pub ifrn_name: [::std::os::raw::c_uchar; 16usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of ifreq__bindgen_ty_1"][::std::mem::size_of::<ifreq__bindgen_ty_1>() - 16usize];
["Alignment of ifreq__bindgen_ty_1"][::std::mem::align_of::<ifreq__bindgen_ty_1>() - 1usize];
["Offset of field: ifreq__bindgen_ty_1::ifrn_name"]
[::std::mem::offset_of!(ifreq__bindgen_ty_1, ifrn_name) - 0usize];
};
impl Default for ifreq__bindgen_ty_1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union ifreq__bindgen_ty_2 {
pub ifru_addr: sockaddr,
pub ifru_dstaddr: sockaddr,
pub ifru_broadaddr: sockaddr,
pub ifru_netmask: sockaddr,
pub ifru_hwaddr: sockaddr,
pub ifru_flags: ::std::os::raw::c_short,
pub ifru_ivalue: ::std::os::raw::c_int,
pub ifru_mtu: ::std::os::raw::c_int,
pub ifru_map: ifmap,
pub ifru_slave: [::std::os::raw::c_char; 16usize],
pub ifru_newname: [::std::os::raw::c_char; 16usize],
pub ifru_data: *mut ::std::os::raw::c_void,
pub ifru_settings: if_settings,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of ifreq__bindgen_ty_2"][::std::mem::size_of::<ifreq__bindgen_ty_2>() - 24usize];
["Alignment of ifreq__bindgen_ty_2"][::std::mem::align_of::<ifreq__bindgen_ty_2>() - 8usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_addr"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_addr) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_dstaddr"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_dstaddr) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_broadaddr"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_broadaddr) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_netmask"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_netmask) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_hwaddr"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_hwaddr) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_flags"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_flags) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_ivalue"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_ivalue) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_mtu"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_mtu) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_map"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_map) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_slave"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_slave) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_newname"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_newname) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_data"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_data) - 0usize];
["Offset of field: ifreq__bindgen_ty_2::ifru_settings"]
[::std::mem::offset_of!(ifreq__bindgen_ty_2, ifru_settings) - 0usize];
};
impl Default for ifreq__bindgen_ty_2 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of ifreq"][::std::mem::size_of::<ifreq>() - 40usize];
["Alignment of ifreq"][::std::mem::align_of::<ifreq>() - 8usize];
["Offset of field: ifreq::ifr_ifrn"][::std::mem::offset_of!(ifreq, ifr_ifrn) - 0usize];
["Offset of field: ifreq::ifr_ifru"][::std::mem::offset_of!(ifreq, ifr_ifru) - 16usize];
};
impl Default for ifreq {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct ifconf {
pub ifc_len: ::std::os::raw::c_int,
pub ifc_ifcu: ifconf__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union ifconf__bindgen_ty_1 {
pub ifcu_buf: *mut ::std::os::raw::c_char,
pub ifcu_req: *mut ifreq,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of ifconf__bindgen_ty_1"][::std::mem::size_of::<ifconf__bindgen_ty_1>() - 8usize];
["Alignment of ifconf__bindgen_ty_1"][::std::mem::align_of::<ifconf__bindgen_ty_1>() - 8usize];
["Offset of field: ifconf__bindgen_ty_1::ifcu_buf"]
[::std::mem::offset_of!(ifconf__bindgen_ty_1, ifcu_buf) - 0usize];
["Offset of field: ifconf__bindgen_ty_1::ifcu_req"]
[::std::mem::offset_of!(ifconf__bindgen_ty_1, ifcu_req) - 0usize];
};
impl Default for ifconf__bindgen_ty_1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of ifconf"][::std::mem::size_of::<ifconf>() - 16usize];
["Alignment of ifconf"][::std::mem::align_of::<ifconf>() - 8usize];
["Offset of field: ifconf::ifc_len"][::std::mem::offset_of!(ifconf, ifc_len) - 0usize];
["Offset of field: ifconf::ifc_ifcu"][::std::mem::offset_of!(ifconf, ifc_ifcu) - 8usize];
};
impl Default for ifconf {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/generated/virtio_ring.rs | src/vmm/src/devices/virtio/generated/virtio_ring.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const VIRTIO_RING_F_EVENT_IDX: u32 = 29;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/generated/virtio_net.rs | src/vmm/src/devices/virtio/generated/virtio_net.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const VIRTIO_NET_F_CSUM: u32 = 0;
pub const VIRTIO_NET_F_GUEST_CSUM: u32 = 1;
pub const VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: u32 = 2;
pub const VIRTIO_NET_F_MTU: u32 = 3;
pub const VIRTIO_NET_F_MAC: u32 = 5;
pub const VIRTIO_NET_F_GUEST_TSO4: u32 = 7;
pub const VIRTIO_NET_F_GUEST_TSO6: u32 = 8;
pub const VIRTIO_NET_F_GUEST_ECN: u32 = 9;
pub const VIRTIO_NET_F_GUEST_UFO: u32 = 10;
pub const VIRTIO_NET_F_HOST_TSO4: u32 = 11;
pub const VIRTIO_NET_F_HOST_TSO6: u32 = 12;
pub const VIRTIO_NET_F_HOST_ECN: u32 = 13;
pub const VIRTIO_NET_F_HOST_UFO: u32 = 14;
pub const VIRTIO_NET_F_MRG_RXBUF: u32 = 15;
pub const VIRTIO_NET_F_STATUS: u32 = 16;
pub const VIRTIO_NET_F_CTRL_VQ: u32 = 17;
pub const VIRTIO_NET_F_CTRL_RX: u32 = 18;
pub const VIRTIO_NET_F_CTRL_VLAN: u32 = 19;
pub const VIRTIO_NET_F_CTRL_RX_EXTRA: u32 = 20;
pub const VIRTIO_NET_F_GUEST_ANNOUNCE: u32 = 21;
pub const VIRTIO_NET_F_MQ: u32 = 22;
pub const VIRTIO_NET_F_CTRL_MAC_ADDR: u32 = 23;
pub const VIRTIO_NET_F_DEVICE_STATS: u32 = 50;
pub const VIRTIO_NET_F_VQ_NOTF_COAL: u32 = 52;
pub const VIRTIO_NET_F_NOTF_COAL: u32 = 53;
pub const VIRTIO_NET_F_GUEST_USO4: u32 = 54;
pub const VIRTIO_NET_F_GUEST_USO6: u32 = 55;
pub const VIRTIO_NET_F_HOST_USO: u32 = 56;
pub const VIRTIO_NET_F_HASH_REPORT: u32 = 57;
pub const VIRTIO_NET_F_GUEST_HDRLEN: u32 = 59;
pub const VIRTIO_NET_F_RSS: u32 = 60;
pub const VIRTIO_NET_F_RSC_EXT: u32 = 61;
pub const VIRTIO_NET_F_STANDBY: u32 = 62;
pub const VIRTIO_NET_F_SPEED_DUPLEX: u32 = 63;
pub const VIRTIO_NET_F_GSO: u32 = 6;
pub type __u8 = ::std::os::raw::c_uchar;
pub type __u16 = ::std::os::raw::c_ushort;
pub type __le16 = __u16;
pub type __virtio16 = __u16;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct virtio_net_hdr_v1 {
pub flags: __u8,
pub gso_type: __u8,
pub hdr_len: __virtio16,
pub gso_size: __virtio16,
pub __bindgen_anon_1: virtio_net_hdr_v1__bindgen_ty_1,
pub num_buffers: __virtio16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union virtio_net_hdr_v1__bindgen_ty_1 {
pub __bindgen_anon_1: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1,
pub csum: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2,
pub rsc: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1 {
pub csum_start: __virtio16,
pub csum_offset: __virtio16,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1"]
[::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1>() - 4usize];
["Alignment of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1"]
[::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1>() - 2usize];
["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1::csum_start"][::std::mem::offset_of!(
virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1,
csum_start
) - 0usize];
["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1::csum_offset"][::std::mem::offset_of!(
virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1,
csum_offset
) - 2usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2 {
pub start: __virtio16,
pub offset: __virtio16,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2"]
[::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2>() - 4usize];
["Alignment of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2"]
[::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2>() - 2usize];
["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2::start"]
[::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2, start) - 0usize];
["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2::offset"]
[::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2, offset) - 2usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3 {
pub segments: __le16,
pub dup_acks: __le16,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3"]
[::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3>() - 4usize];
["Alignment of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3"]
[::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3>() - 2usize];
["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3::segments"]
[::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3, segments) - 0usize];
["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3::dup_acks"]
[::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3, dup_acks) - 2usize];
};
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_net_hdr_v1__bindgen_ty_1"]
[::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1>() - 4usize];
["Alignment of virtio_net_hdr_v1__bindgen_ty_1"]
[::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1>() - 2usize];
["Offset of field: virtio_net_hdr_v1__bindgen_ty_1::csum"]
[::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1, csum) - 0usize];
["Offset of field: virtio_net_hdr_v1__bindgen_ty_1::rsc"]
[::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1, rsc) - 0usize];
};
impl Default for virtio_net_hdr_v1__bindgen_ty_1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_net_hdr_v1"][::std::mem::size_of::<virtio_net_hdr_v1>() - 12usize];
["Alignment of virtio_net_hdr_v1"][::std::mem::align_of::<virtio_net_hdr_v1>() - 2usize];
["Offset of field: virtio_net_hdr_v1::flags"]
[::std::mem::offset_of!(virtio_net_hdr_v1, flags) - 0usize];
["Offset of field: virtio_net_hdr_v1::gso_type"]
[::std::mem::offset_of!(virtio_net_hdr_v1, gso_type) - 1usize];
["Offset of field: virtio_net_hdr_v1::hdr_len"]
[::std::mem::offset_of!(virtio_net_hdr_v1, hdr_len) - 2usize];
["Offset of field: virtio_net_hdr_v1::gso_size"]
[::std::mem::offset_of!(virtio_net_hdr_v1, gso_size) - 4usize];
["Offset of field: virtio_net_hdr_v1::num_buffers"]
[::std::mem::offset_of!(virtio_net_hdr_v1, num_buffers) - 10usize];
};
impl Default for virtio_net_hdr_v1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/generated/virtio_blk.rs | src/vmm/src/devices/virtio/generated/virtio_blk.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const VIRTIO_BLK_F_SIZE_MAX: u32 = 1;
pub const VIRTIO_BLK_F_SEG_MAX: u32 = 2;
pub const VIRTIO_BLK_F_GEOMETRY: u32 = 4;
pub const VIRTIO_BLK_F_RO: u32 = 5;
pub const VIRTIO_BLK_F_BLK_SIZE: u32 = 6;
pub const VIRTIO_BLK_F_TOPOLOGY: u32 = 10;
pub const VIRTIO_BLK_F_MQ: u32 = 12;
pub const VIRTIO_BLK_F_DISCARD: u32 = 13;
pub const VIRTIO_BLK_F_WRITE_ZEROES: u32 = 14;
pub const VIRTIO_BLK_F_SECURE_ERASE: u32 = 16;
pub const VIRTIO_BLK_F_ZONED: u32 = 17;
pub const VIRTIO_BLK_F_BARRIER: u32 = 0;
pub const VIRTIO_BLK_F_SCSI: u32 = 7;
pub const VIRTIO_BLK_F_FLUSH: u32 = 9;
pub const VIRTIO_BLK_F_CONFIG_WCE: u32 = 11;
pub const VIRTIO_BLK_F_WCE: u32 = 9;
pub const VIRTIO_BLK_ID_BYTES: u32 = 20;
pub const VIRTIO_BLK_T_IN: u32 = 0;
pub const VIRTIO_BLK_T_OUT: u32 = 1;
pub const VIRTIO_BLK_T_SCSI_CMD: u32 = 2;
pub const VIRTIO_BLK_T_FLUSH: u32 = 4;
pub const VIRTIO_BLK_T_GET_ID: u32 = 8;
pub const VIRTIO_BLK_T_DISCARD: u32 = 11;
pub const VIRTIO_BLK_T_WRITE_ZEROES: u32 = 13;
pub const VIRTIO_BLK_T_SECURE_ERASE: u32 = 14;
pub const VIRTIO_BLK_T_ZONE_APPEND: u32 = 15;
pub const VIRTIO_BLK_T_ZONE_REPORT: u32 = 16;
pub const VIRTIO_BLK_T_ZONE_OPEN: u32 = 18;
pub const VIRTIO_BLK_T_ZONE_CLOSE: u32 = 20;
pub const VIRTIO_BLK_T_ZONE_FINISH: u32 = 22;
pub const VIRTIO_BLK_T_ZONE_RESET: u32 = 24;
pub const VIRTIO_BLK_T_ZONE_RESET_ALL: u32 = 26;
pub const VIRTIO_BLK_T_BARRIER: u32 = 2147483648;
pub const VIRTIO_BLK_Z_NONE: u32 = 0;
pub const VIRTIO_BLK_Z_HM: u32 = 1;
pub const VIRTIO_BLK_Z_HA: u32 = 2;
pub const VIRTIO_BLK_ZT_CONV: u32 = 1;
pub const VIRTIO_BLK_ZT_SWR: u32 = 2;
pub const VIRTIO_BLK_ZT_SWP: u32 = 3;
pub const VIRTIO_BLK_ZS_NOT_WP: u32 = 0;
pub const VIRTIO_BLK_ZS_EMPTY: u32 = 1;
pub const VIRTIO_BLK_ZS_IOPEN: u32 = 2;
pub const VIRTIO_BLK_ZS_EOPEN: u32 = 3;
pub const VIRTIO_BLK_ZS_CLOSED: u32 = 4;
pub const VIRTIO_BLK_ZS_RDONLY: u32 = 13;
pub const VIRTIO_BLK_ZS_FULL: u32 = 14;
pub const VIRTIO_BLK_ZS_OFFLINE: u32 = 15;
pub const VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP: u32 = 1;
pub const VIRTIO_BLK_S_OK: u32 = 0;
pub const VIRTIO_BLK_S_IOERR: u32 = 1;
pub const VIRTIO_BLK_S_UNSUPP: u32 = 2;
pub const VIRTIO_BLK_S_ZONE_INVALID_CMD: u32 = 3;
pub const VIRTIO_BLK_S_ZONE_UNALIGNED_WP: u32 = 4;
pub const VIRTIO_BLK_S_ZONE_OPEN_RESOURCE: u32 = 5;
pub const VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE: u32 = 6;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/generated/mod.rs | src/vmm/src/devices/virtio/generated/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
#![allow(clippy::all)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
pub mod virtio_blk;
pub mod virtio_config;
pub mod virtio_ids;
pub mod virtio_mem;
pub mod virtio_net;
pub mod virtio_ring;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/generated/virtio_config.rs | src/vmm/src/devices/virtio/generated/virtio_config.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const VIRTIO_F_NOTIFY_ON_EMPTY: u32 = 24;
pub const VIRTIO_F_ANY_LAYOUT: u32 = 27;
pub const VIRTIO_F_VERSION_1: u32 = 32;
pub const VIRTIO_F_ACCESS_PLATFORM: u32 = 33;
pub const VIRTIO_F_IOMMU_PLATFORM: u32 = 33;
pub const VIRTIO_F_RING_PACKED: u32 = 34;
pub const VIRTIO_F_IN_ORDER: u32 = 35;
pub const VIRTIO_F_ORDER_PLATFORM: u32 = 36;
pub const VIRTIO_F_SR_IOV: u32 = 37;
pub const VIRTIO_F_NOTIFICATION_DATA: u32 = 38;
pub const VIRTIO_F_NOTIF_CONFIG_DATA: u32 = 39;
pub const VIRTIO_F_RING_RESET: u32 = 40;
pub const VIRTIO_F_ADMIN_VQ: u32 = 41;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/generated/virtio_mem.rs | src/vmm/src/devices/virtio/generated/virtio_mem.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const VIRTIO_MEM_F_ACPI_PXM: u32 = 0;
pub const VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: u32 = 1;
pub const VIRTIO_MEM_F_PERSISTENT_SUSPEND: u32 = 2;
pub const VIRTIO_MEM_REQ_PLUG: u32 = 0;
pub const VIRTIO_MEM_REQ_UNPLUG: u32 = 1;
pub const VIRTIO_MEM_REQ_UNPLUG_ALL: u32 = 2;
pub const VIRTIO_MEM_REQ_STATE: u32 = 3;
pub const VIRTIO_MEM_RESP_ACK: u32 = 0;
pub const VIRTIO_MEM_RESP_NACK: u32 = 1;
pub const VIRTIO_MEM_RESP_BUSY: u32 = 2;
pub const VIRTIO_MEM_RESP_ERROR: u32 = 3;
pub const VIRTIO_MEM_STATE_PLUGGED: u32 = 0;
pub const VIRTIO_MEM_STATE_UNPLUGGED: u32 = 1;
pub const VIRTIO_MEM_STATE_MIXED: u32 = 2;
pub type __u8 = ::std::os::raw::c_uchar;
pub type __u16 = ::std::os::raw::c_ushort;
pub type __u64 = ::std::os::raw::c_ulonglong;
pub type __le16 = __u16;
pub type __le64 = __u64;
pub type __virtio16 = __u16;
pub type __virtio64 = __u64;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_mem_req_plug {
pub addr: __virtio64,
pub nb_blocks: __virtio16,
pub padding: [__virtio16; 3usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_req_plug"][::std::mem::size_of::<virtio_mem_req_plug>() - 16usize];
["Alignment of virtio_mem_req_plug"][::std::mem::align_of::<virtio_mem_req_plug>() - 8usize];
["Offset of field: virtio_mem_req_plug::addr"]
[::std::mem::offset_of!(virtio_mem_req_plug, addr) - 0usize];
["Offset of field: virtio_mem_req_plug::nb_blocks"]
[::std::mem::offset_of!(virtio_mem_req_plug, nb_blocks) - 8usize];
["Offset of field: virtio_mem_req_plug::padding"]
[::std::mem::offset_of!(virtio_mem_req_plug, padding) - 10usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_mem_req_unplug {
pub addr: __virtio64,
pub nb_blocks: __virtio16,
pub padding: [__virtio16; 3usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_req_unplug"][::std::mem::size_of::<virtio_mem_req_unplug>() - 16usize];
["Alignment of virtio_mem_req_unplug"]
[::std::mem::align_of::<virtio_mem_req_unplug>() - 8usize];
["Offset of field: virtio_mem_req_unplug::addr"]
[::std::mem::offset_of!(virtio_mem_req_unplug, addr) - 0usize];
["Offset of field: virtio_mem_req_unplug::nb_blocks"]
[::std::mem::offset_of!(virtio_mem_req_unplug, nb_blocks) - 8usize];
["Offset of field: virtio_mem_req_unplug::padding"]
[::std::mem::offset_of!(virtio_mem_req_unplug, padding) - 10usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_mem_req_state {
pub addr: __virtio64,
pub nb_blocks: __virtio16,
pub padding: [__virtio16; 3usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_req_state"][::std::mem::size_of::<virtio_mem_req_state>() - 16usize];
["Alignment of virtio_mem_req_state"][::std::mem::align_of::<virtio_mem_req_state>() - 8usize];
["Offset of field: virtio_mem_req_state::addr"]
[::std::mem::offset_of!(virtio_mem_req_state, addr) - 0usize];
["Offset of field: virtio_mem_req_state::nb_blocks"]
[::std::mem::offset_of!(virtio_mem_req_state, nb_blocks) - 8usize];
["Offset of field: virtio_mem_req_state::padding"]
[::std::mem::offset_of!(virtio_mem_req_state, padding) - 10usize];
};
#[repr(C)]
#[derive(Copy, Clone)]
pub struct virtio_mem_req {
pub type_: __virtio16,
pub padding: [__virtio16; 3usize],
pub u: virtio_mem_req__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union virtio_mem_req__bindgen_ty_1 {
pub plug: virtio_mem_req_plug,
pub unplug: virtio_mem_req_unplug,
pub state: virtio_mem_req_state,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_req__bindgen_ty_1"]
[::std::mem::size_of::<virtio_mem_req__bindgen_ty_1>() - 16usize];
["Alignment of virtio_mem_req__bindgen_ty_1"]
[::std::mem::align_of::<virtio_mem_req__bindgen_ty_1>() - 8usize];
["Offset of field: virtio_mem_req__bindgen_ty_1::plug"]
[::std::mem::offset_of!(virtio_mem_req__bindgen_ty_1, plug) - 0usize];
["Offset of field: virtio_mem_req__bindgen_ty_1::unplug"]
[::std::mem::offset_of!(virtio_mem_req__bindgen_ty_1, unplug) - 0usize];
["Offset of field: virtio_mem_req__bindgen_ty_1::state"]
[::std::mem::offset_of!(virtio_mem_req__bindgen_ty_1, state) - 0usize];
};
impl Default for virtio_mem_req__bindgen_ty_1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_req"][::std::mem::size_of::<virtio_mem_req>() - 24usize];
["Alignment of virtio_mem_req"][::std::mem::align_of::<virtio_mem_req>() - 8usize];
["Offset of field: virtio_mem_req::type_"]
[::std::mem::offset_of!(virtio_mem_req, type_) - 0usize];
["Offset of field: virtio_mem_req::padding"]
[::std::mem::offset_of!(virtio_mem_req, padding) - 2usize];
["Offset of field: virtio_mem_req::u"][::std::mem::offset_of!(virtio_mem_req, u) - 8usize];
};
impl Default for virtio_mem_req {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_mem_resp_state {
pub state: __virtio16,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_resp_state"][::std::mem::size_of::<virtio_mem_resp_state>() - 2usize];
["Alignment of virtio_mem_resp_state"]
[::std::mem::align_of::<virtio_mem_resp_state>() - 2usize];
["Offset of field: virtio_mem_resp_state::state"]
[::std::mem::offset_of!(virtio_mem_resp_state, state) - 0usize];
};
#[repr(C)]
#[derive(Copy, Clone)]
pub struct virtio_mem_resp {
pub type_: __virtio16,
pub padding: [__virtio16; 3usize],
pub u: virtio_mem_resp__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union virtio_mem_resp__bindgen_ty_1 {
pub state: virtio_mem_resp_state,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_resp__bindgen_ty_1"]
[::std::mem::size_of::<virtio_mem_resp__bindgen_ty_1>() - 2usize];
["Alignment of virtio_mem_resp__bindgen_ty_1"]
[::std::mem::align_of::<virtio_mem_resp__bindgen_ty_1>() - 2usize];
["Offset of field: virtio_mem_resp__bindgen_ty_1::state"]
[::std::mem::offset_of!(virtio_mem_resp__bindgen_ty_1, state) - 0usize];
};
impl Default for virtio_mem_resp__bindgen_ty_1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_resp"][::std::mem::size_of::<virtio_mem_resp>() - 10usize];
["Alignment of virtio_mem_resp"][::std::mem::align_of::<virtio_mem_resp>() - 2usize];
["Offset of field: virtio_mem_resp::type_"]
[::std::mem::offset_of!(virtio_mem_resp, type_) - 0usize];
["Offset of field: virtio_mem_resp::padding"]
[::std::mem::offset_of!(virtio_mem_resp, padding) - 2usize];
["Offset of field: virtio_mem_resp::u"][::std::mem::offset_of!(virtio_mem_resp, u) - 8usize];
};
impl Default for virtio_mem_resp {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_mem_config {
pub block_size: __le64,
pub node_id: __le16,
pub padding: [__u8; 6usize],
pub addr: __le64,
pub region_size: __le64,
pub usable_region_size: __le64,
pub plugged_size: __le64,
pub requested_size: __le64,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of virtio_mem_config"][::std::mem::size_of::<virtio_mem_config>() - 56usize];
["Alignment of virtio_mem_config"][::std::mem::align_of::<virtio_mem_config>() - 8usize];
["Offset of field: virtio_mem_config::block_size"]
[::std::mem::offset_of!(virtio_mem_config, block_size) - 0usize];
["Offset of field: virtio_mem_config::node_id"]
[::std::mem::offset_of!(virtio_mem_config, node_id) - 8usize];
["Offset of field: virtio_mem_config::padding"]
[::std::mem::offset_of!(virtio_mem_config, padding) - 10usize];
["Offset of field: virtio_mem_config::addr"]
[::std::mem::offset_of!(virtio_mem_config, addr) - 16usize];
["Offset of field: virtio_mem_config::region_size"]
[::std::mem::offset_of!(virtio_mem_config, region_size) - 24usize];
["Offset of field: virtio_mem_config::usable_region_size"]
[::std::mem::offset_of!(virtio_mem_config, usable_region_size) - 32usize];
["Offset of field: virtio_mem_config::plugged_size"]
[::std::mem::offset_of!(virtio_mem_config, plugged_size) - 40usize];
["Offset of field: virtio_mem_config::requested_size"]
[::std::mem::offset_of!(virtio_mem_config, requested_size) - 48usize];
};
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/generated/virtio_ids.rs | src/vmm/src/devices/virtio/generated/virtio_ids.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const VIRTIO_ID_NET: u32 = 1;
pub const VIRTIO_ID_BLOCK: u32 = 2;
pub const VIRTIO_ID_CONSOLE: u32 = 3;
pub const VIRTIO_ID_RNG: u32 = 4;
pub const VIRTIO_ID_BALLOON: u32 = 5;
pub const VIRTIO_ID_IOMEM: u32 = 6;
pub const VIRTIO_ID_RPMSG: u32 = 7;
pub const VIRTIO_ID_SCSI: u32 = 8;
pub const VIRTIO_ID_9P: u32 = 9;
pub const VIRTIO_ID_MAC80211_WLAN: u32 = 10;
pub const VIRTIO_ID_RPROC_SERIAL: u32 = 11;
pub const VIRTIO_ID_CAIF: u32 = 12;
pub const VIRTIO_ID_MEMORY_BALLOON: u32 = 13;
pub const VIRTIO_ID_GPU: u32 = 16;
pub const VIRTIO_ID_CLOCK: u32 = 17;
pub const VIRTIO_ID_INPUT: u32 = 18;
pub const VIRTIO_ID_VSOCK: u32 = 19;
pub const VIRTIO_ID_CRYPTO: u32 = 20;
pub const VIRTIO_ID_SIGNAL_DIST: u32 = 21;
pub const VIRTIO_ID_PSTORE: u32 = 22;
pub const VIRTIO_ID_IOMMU: u32 = 23;
pub const VIRTIO_ID_MEM: u32 = 24;
pub const VIRTIO_ID_SOUND: u32 = 25;
pub const VIRTIO_ID_FS: u32 = 26;
pub const VIRTIO_ID_PMEM: u32 = 27;
pub const VIRTIO_ID_RPMB: u32 = 28;
pub const VIRTIO_ID_MAC80211_HWSIM: u32 = 29;
pub const VIRTIO_ID_VIDEO_ENCODER: u32 = 30;
pub const VIRTIO_ID_VIDEO_DECODER: u32 = 31;
pub const VIRTIO_ID_SCMI: u32 = 32;
pub const VIRTIO_ID_NITRO_SEC_MOD: u32 = 33;
pub const VIRTIO_ID_I2C_ADAPTER: u32 = 34;
pub const VIRTIO_ID_WATCHDOG: u32 = 35;
pub const VIRTIO_ID_CAN: u32 = 36;
pub const VIRTIO_ID_DMABUF: u32 = 37;
pub const VIRTIO_ID_PARAM_SERV: u32 = 38;
pub const VIRTIO_ID_AUDIO_POLICY: u32 = 39;
pub const VIRTIO_ID_BT: u32 = 40;
pub const VIRTIO_ID_GPIO: u32 = 41;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/transport/mod.rs | src/vmm/src/devices/virtio/transport/mod.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use std::sync::atomic::AtomicU32;
use vmm_sys_util::eventfd::EventFd;
use crate::vstate::interrupts::InterruptError;
/// MMIO transport for VirtIO devices
pub mod mmio;
/// PCI transport for VirtIO devices
pub mod pci;
/// Represents the types of interrupts used by VirtIO devices
#[derive(Debug, Clone)]
pub enum VirtioInterruptType {
/// Interrupt for VirtIO configuration changes
Config,
/// Interrupts for new events in a queue.
Queue(u16),
}
/// API of interrupt types used by VirtIO devices
pub trait VirtioInterrupt: std::fmt::Debug + Send + Sync {
/// Trigger a VirtIO interrupt.
fn trigger(&self, interrupt_type: VirtioInterruptType) -> Result<(), InterruptError>;
/// Trigger multiple Virtio interrupts for selected queues.
/// The caller needs to ensure that [`queues`] does not include duplicate entries to
/// avoid sending multiple interrupts for the same queue.
/// This is to allow sending a single interrupt for implementations that don't
/// distinguish different queues, like IrqTrigger, instead of sending multiple same
/// interrupts.
fn trigger_queues(&self, queues: &[u16]) -> Result<(), InterruptError> {
queues
.iter()
.try_for_each(|&qidx| self.trigger(VirtioInterruptType::Queue(qidx)))
}
/// Get the `EventFd` (if any) that backs the underlying interrupt.
fn notifier(&self, _interrupt_type: VirtioInterruptType) -> Option<&EventFd> {
None
}
/// Get the current device interrupt status.
fn status(&self) -> Arc<AtomicU32>;
/// Returns true if there is any pending interrupt
#[cfg(test)]
fn has_pending_interrupt(&self, interrupt_type: VirtioInterruptType) -> bool;
/// Used to acknowledge an interrupt
#[cfg(test)]
fn ack_interrupt(&self, interrupt_type: VirtioInterruptType);
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/transport/mmio.rs | src/vmm/src/devices/virtio/transport/mmio.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::Debug;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, Barrier, Mutex, MutexGuard};
use vmm_sys_util::eventfd::EventFd;
use super::{VirtioInterrupt, VirtioInterruptType};
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::device_status;
use crate::devices::virtio::queue::Queue;
use crate::logger::{IncMetric, METRICS, error, warn};
use crate::utils::byte_order;
use crate::vstate::bus::BusDevice;
use crate::vstate::interrupts::InterruptError;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
// TODO crosvm uses 0 here, but IIRC virtio specified some other vendor id that should be used
const VENDOR_ID: u32 = 0;
/// Interrupt flags (re: interrupt status & acknowledge registers).
/// See linux/virtio_mmio.h.
pub const VIRTIO_MMIO_INT_VRING: u32 = 0x01;
pub const VIRTIO_MMIO_INT_CONFIG: u32 = 0x02;
// required by the virtio mmio device register layout at offset 0 from base
const MMIO_MAGIC_VALUE: u32 = 0x7472_6976;
// current version specified by the mmio standard (legacy devices used 1 here)
const MMIO_VERSION: u32 = 2;
/// Implements the
/// [MMIO](http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-1090002)
/// transport for virtio devices.
///
/// This requires 3 points of installation to work with a VM:
///
/// 1. Mmio reads and writes must be sent to this device at what is referred to here as MMIO base.
/// 1. `Mmio::queue_evts` must be installed at `virtio::NOTIFY_REG_OFFSET` offset from the MMIO
/// base. Each event in the array must be signaled if the index is written at that offset.
/// 1. `Mmio::interrupt_evt` must signal an interrupt that the guest driver is listening to when it
/// is written to.
///
/// Typically one page (4096 bytes) of MMIO address space is sufficient to handle this transport
/// and inner virtio device.
#[derive(Debug, Clone)]
pub struct MmioTransport {
device: Arc<Mutex<dyn VirtioDevice>>,
// The register where feature bits are stored.
pub(crate) features_select: u32,
// The register where features page is selected.
pub(crate) acked_features_select: u32,
pub(crate) queue_select: u32,
pub(crate) device_status: u32,
pub(crate) config_generation: u32,
mem: GuestMemoryMmap,
pub(crate) interrupt: Arc<IrqTrigger>,
pub is_vhost_user: bool,
}
impl MmioTransport {
/// Constructs a new MMIO transport for the given virtio device.
pub fn new(
mem: GuestMemoryMmap,
interrupt: Arc<IrqTrigger>,
device: Arc<Mutex<dyn VirtioDevice>>,
is_vhost_user: bool,
) -> MmioTransport {
MmioTransport {
device,
features_select: 0,
acked_features_select: 0,
queue_select: 0,
device_status: device_status::INIT,
config_generation: 0,
mem,
interrupt,
is_vhost_user,
}
}
/// Gets the encapsulated locked VirtioDevice.
pub fn locked_device(&self) -> MutexGuard<'_, dyn VirtioDevice + 'static> {
self.device.lock().expect("Poisoned lock")
}
/// Gets the encapsulated VirtioDevice.
pub fn device(&self) -> Arc<Mutex<dyn VirtioDevice>> {
self.device.clone()
}
fn check_device_status(&self, set: u32, clr: u32) -> bool {
self.device_status & (set | clr) == set
}
fn with_queue<U, F>(&self, d: U, f: F) -> U
where
F: FnOnce(&Queue) -> U,
U: Debug,
{
match self
.locked_device()
.queues()
.get(self.queue_select as usize)
{
Some(queue) => f(queue),
None => d,
}
}
fn with_queue_mut<F: FnOnce(&mut Queue)>(&mut self, f: F) -> bool {
if let Some(queue) = self
.locked_device()
.queues_mut()
.get_mut(self.queue_select as usize)
{
f(queue);
true
} else {
false
}
}
fn update_queue_field<F: FnOnce(&mut Queue)>(&mut self, f: F) {
if self.check_device_status(
device_status::FEATURES_OK,
device_status::DRIVER_OK | device_status::FAILED,
) {
self.with_queue_mut(f);
} else {
warn!(
"update virtio queue in invalid state {:#x}",
self.device_status
);
}
}
fn reset(&mut self) {
if self.locked_device().is_activated() {
warn!("reset device while it's still in active state");
}
self.features_select = 0;
self.acked_features_select = 0;
self.queue_select = 0;
self.interrupt.irq_status.store(0, Ordering::SeqCst);
self.device_status = device_status::INIT;
// . Keep interrupt_evt and queue_evts as is. There may be pending notifications in those
// eventfds, but nothing will happen other than supurious wakeups.
// . Do not reset config_generation and keep it monotonically increasing
for queue in self.locked_device().queues_mut() {
*queue = Queue::new(queue.max_size);
}
}
/// Update device status according to the state machine defined by VirtIO Spec 1.0.
/// Please refer to VirtIO Spec 1.0, section 2.1.1 and 3.1.1.
///
/// The driver MUST update device status, setting bits to indicate the completed steps
/// of the driver initialization sequence specified in 3.1. The driver MUST NOT clear
/// a device status bit. If the driver sets the FAILED bit, the driver MUST later reset
/// the device before attempting to re-initialize.
#[allow(unused_assignments)]
fn set_device_status(&mut self, status: u32) {
use device_status::*;
// match changed bits
match !self.device_status & status {
ACKNOWLEDGE if self.device_status == INIT => {
self.device_status = status;
}
DRIVER if self.device_status == ACKNOWLEDGE => {
self.device_status = status;
}
FEATURES_OK if self.device_status == (ACKNOWLEDGE | DRIVER) => {
self.device_status = status;
}
DRIVER_OK if self.device_status == (ACKNOWLEDGE | DRIVER | FEATURES_OK) => {
self.device_status = status;
let mut locked_device = self.device.lock().expect("Poisoned lock");
let device_activated = locked_device.is_activated();
if !device_activated {
// temporary variable needed for borrow checker
let activate_result =
locked_device.activate(self.mem.clone(), self.interrupt.clone());
if let Err(err) = activate_result {
self.device_status |= DEVICE_NEEDS_RESET;
// Section 2.1.2 of the specification states that we need to send a device
// configuration change interrupt
let _ = self.interrupt.trigger(VirtioInterruptType::Config);
error!("Failed to activate virtio device: {}", err)
}
}
}
_ if (status & FAILED) != 0 => {
// TODO: notify backend driver to stop the device
self.device_status |= FAILED;
}
_ if status == 0 => {
{
let mut locked_device = self.device.lock().expect("Poisoned lock");
if locked_device.is_activated() {
let mut device_status = self.device_status;
let reset_result = locked_device.reset();
match reset_result {
Some((_interrupt_evt, mut _queue_evts)) => {}
None => {
device_status |= FAILED;
}
}
self.device_status = device_status;
}
}
// If the backend device driver doesn't support reset,
// just leave the device marked as FAILED.
if self.device_status & FAILED == 0 {
self.reset();
}
}
_ => {
warn!(
"invalid virtio driver status transition: {:#x} -> {:#x}",
self.device_status, status
);
}
}
}
}
impl BusDevice for MmioTransport {
fn read(&mut self, base: u64, offset: u64, data: &mut [u8]) {
match offset {
0x00..=0xff if data.len() == 4 => {
let v = match offset {
0x0 => MMIO_MAGIC_VALUE,
0x04 => MMIO_VERSION,
0x08 => self.locked_device().device_type(),
0x0c => VENDOR_ID, // vendor id
0x10 => {
let mut features = self
.locked_device()
.avail_features_by_page(self.features_select);
if self.features_select == 1 {
features |= 0x1; // enable support of VirtIO Version 1
}
features
}
0x34 => self.with_queue(0, |q| u32::from(q.max_size)),
0x44 => self.with_queue(0, |q| u32::from(q.ready)),
0x60 => {
// For vhost-user backed devices we need some additional
// logic to differentiate between `VIRTIO_MMIO_INT_VRING`
// and `VIRTIO_MMIO_INT_CONFIG` statuses.
// Because backend cannot propagate any interrupt status
// changes to the FC we always try to serve the `VIRTIO_MMIO_INT_VRING`
// status. But in case when backend changes the configuration and
// user triggers the manual notification, FC needs to send
// `VIRTIO_MMIO_INT_CONFIG`. We know that for vhost-user devices the
// interrupt status can only be 0 (no one set any bits) or
// `VIRTIO_MMIO_INT_CONFIG`. Based on this knowledge we can simply
// check if the current interrupt_status is equal to the
// `VIRTIO_MMIO_INT_CONFIG` or not to understand if we need to send
// `VIRTIO_MMIO_INT_CONFIG` or
// `VIRTIO_MMIO_INT_VRING`.
let is = self.interrupt.irq_status.load(Ordering::SeqCst);
if !self.is_vhost_user {
is
} else if is == VIRTIO_MMIO_INT_CONFIG {
VIRTIO_MMIO_INT_CONFIG
} else {
VIRTIO_MMIO_INT_VRING
}
}
0x70 => self.device_status,
0xfc => self.config_generation,
_ => {
warn!("unknown virtio mmio register read: {:#x}", offset);
return;
}
};
byte_order::write_le_u32(data, v);
}
0x100..=0xfff => self.locked_device().read_config(offset - 0x100, data),
_ => {
warn!(
"invalid virtio mmio read: {base:#x}:{offset:#x}:{:#x}",
data.len()
);
}
};
}
fn write(&mut self, base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
fn hi(v: &mut GuestAddress, x: u32) {
*v = (*v & 0xffff_ffff) | (u64::from(x) << 32)
}
fn lo(v: &mut GuestAddress, x: u32) {
*v = (*v & !0xffff_ffff) | u64::from(x)
}
match offset {
0x00..=0xff if data.len() == 4 => {
let v = byte_order::read_le_u32(data);
match offset {
0x14 => self.features_select = v,
0x20 => {
if self.check_device_status(
device_status::DRIVER,
device_status::FEATURES_OK
| device_status::FAILED
| device_status::DEVICE_NEEDS_RESET,
) {
self.locked_device()
.ack_features_by_page(self.acked_features_select, v);
} else {
warn!(
"ack virtio features in invalid state {:#x}",
self.device_status
);
}
}
0x24 => self.acked_features_select = v,
0x30 => self.queue_select = v,
0x38 => self.update_queue_field(|q| q.size = (v & 0xffff) as u16),
0x44 => self.update_queue_field(|q| q.ready = v == 1),
0x64 => {
if self.check_device_status(device_status::DRIVER_OK, 0) {
self.interrupt.irq_status.fetch_and(!v, Ordering::SeqCst);
}
}
0x70 => self.set_device_status(v),
0x80 => self.update_queue_field(|q| lo(&mut q.desc_table_address, v)),
0x84 => self.update_queue_field(|q| hi(&mut q.desc_table_address, v)),
0x90 => self.update_queue_field(|q| lo(&mut q.avail_ring_address, v)),
0x94 => self.update_queue_field(|q| hi(&mut q.avail_ring_address, v)),
0xa0 => self.update_queue_field(|q| lo(&mut q.used_ring_address, v)),
0xa4 => self.update_queue_field(|q| hi(&mut q.used_ring_address, v)),
_ => {
warn!("unknown virtio mmio register write: {:#x}", offset);
}
}
}
0x100..=0xfff => {
if self.check_device_status(
device_status::DRIVER,
device_status::FAILED | device_status::DEVICE_NEEDS_RESET,
) {
self.locked_device().write_config(offset - 0x100, data)
} else {
warn!("can not write to device config data area before driver is ready");
}
}
_ => {
warn!(
"invalid virtio mmio write: {base:#x}:{offset:#x}:{:#x}",
data.len()
);
}
}
None
}
}
/// The 2 types of interrupt sources in MMIO transport.
#[derive(Debug)]
pub enum IrqType {
/// Interrupt triggered by change in config.
Config,
/// Interrupt triggered by used vring buffers.
Vring,
}
impl From<VirtioInterruptType> for IrqType {
fn from(interrupt_type: VirtioInterruptType) -> Self {
match interrupt_type {
VirtioInterruptType::Config => IrqType::Config,
VirtioInterruptType::Queue(_) => IrqType::Vring,
}
}
}
/// Helper struct that is responsible for triggering guest IRQs
#[derive(Debug)]
pub struct IrqTrigger {
pub(crate) irq_status: Arc<AtomicU32>,
pub(crate) irq_evt: EventFd,
}
impl Default for IrqTrigger {
fn default() -> Self {
Self::new()
}
}
impl VirtioInterrupt for IrqTrigger {
fn trigger(&self, interrupt_type: VirtioInterruptType) -> Result<(), InterruptError> {
METRICS.interrupts.triggers.inc();
match interrupt_type {
VirtioInterruptType::Config => self.trigger_irq(IrqType::Config),
VirtioInterruptType::Queue(_) => self.trigger_irq(IrqType::Vring),
}
}
fn trigger_queues(&self, queues: &[u16]) -> Result<(), InterruptError> {
if queues.is_empty() {
Ok(())
} else {
METRICS.interrupts.triggers.inc();
self.trigger_irq(IrqType::Vring)
}
}
fn notifier(&self, _interrupt_type: VirtioInterruptType) -> Option<&EventFd> {
Some(&self.irq_evt)
}
fn status(&self) -> Arc<AtomicU32> {
self.irq_status.clone()
}
#[cfg(test)]
fn has_pending_interrupt(&self, interrupt_type: VirtioInterruptType) -> bool {
if let Ok(num_irqs) = self.irq_evt.read() {
if num_irqs == 0 {
return false;
}
let irq_status = self.irq_status.load(Ordering::SeqCst);
return matches!(
(irq_status, interrupt_type.into()),
(VIRTIO_MMIO_INT_CONFIG, IrqType::Config) | (VIRTIO_MMIO_INT_VRING, IrqType::Vring)
);
}
false
}
#[cfg(test)]
fn ack_interrupt(&self, interrupt_type: VirtioInterruptType) {
let irq = match interrupt_type {
VirtioInterruptType::Config => VIRTIO_MMIO_INT_CONFIG,
VirtioInterruptType::Queue(_) => VIRTIO_MMIO_INT_VRING,
};
self.irq_status.fetch_and(!irq, Ordering::SeqCst);
}
}
impl IrqTrigger {
pub fn new() -> Self {
Self {
irq_status: Arc::new(AtomicU32::new(0)),
irq_evt: EventFd::new(libc::EFD_NONBLOCK)
.expect("Could not create EventFd for IrqTrigger"),
}
}
fn trigger_irq(&self, irq_type: IrqType) -> Result<(), InterruptError> {
let irq = match irq_type {
IrqType::Config => VIRTIO_MMIO_INT_CONFIG,
IrqType::Vring => VIRTIO_MMIO_INT_VRING,
};
self.irq_status.fetch_or(irq, Ordering::SeqCst);
self.irq_evt.write(1).map_err(|err| {
error!("Failed to send irq to the guest: {:?}", err);
err
})?;
Ok(())
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::ops::Deref;
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::device_status::DEVICE_NEEDS_RESET;
use crate::impl_device_type;
use crate::test_utils::single_region_mem;
use crate::utils::byte_order::{read_le_u32, write_le_u32};
use crate::utils::u64_to_usize;
use crate::vstate::memory::GuestMemoryMmap;
#[derive(Debug)]
pub(crate) struct DummyDevice {
acked_features: u64,
avail_features: u64,
interrupt_trigger: Option<Arc<dyn VirtioInterrupt>>,
queue_evts: Vec<EventFd>,
queues: Vec<Queue>,
device_activated: bool,
config_bytes: [u8; 0xeff],
activate_should_error: bool,
}
impl DummyDevice {
pub(crate) fn new() -> Self {
DummyDevice {
acked_features: 0,
avail_features: 0,
interrupt_trigger: None,
queue_evts: vec![
EventFd::new(libc::EFD_NONBLOCK).unwrap(),
EventFd::new(libc::EFD_NONBLOCK).unwrap(),
],
queues: vec![Queue::new(16), Queue::new(32)],
device_activated: false,
config_bytes: [0; 0xeff],
activate_should_error: false,
}
}
pub fn set_avail_features(&mut self, avail_features: u64) {
self.avail_features = avail_features;
}
}
impl VirtioDevice for DummyDevice {
impl_device_type!(123);
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features;
}
fn queues(&self) -> &[Queue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [Queue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_evts
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.interrupt_trigger
.as_ref()
.expect("Device is not activated")
.deref()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
data.copy_from_slice(&self.config_bytes[u64_to_usize(offset)..]);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
for (i, item) in data.iter().enumerate() {
self.config_bytes[u64_to_usize(offset) + i] = *item;
}
}
fn activate(
&mut self,
_: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
self.device_activated = true;
self.interrupt_trigger = Some(interrupt);
if self.activate_should_error {
Err(ActivateError::EventFd)
} else {
Ok(())
}
}
fn is_activated(&self) -> bool {
self.device_activated
}
}
fn set_device_status(d: &mut MmioTransport, status: u32) {
let mut buf = [0; 4];
write_le_u32(&mut buf[..], status);
d.write(0x0, 0x70, &buf[..]);
}
#[test]
fn test_new() {
let m = single_region_mem(0x1000);
let interrupt = Arc::new(IrqTrigger::new());
let mut dummy = DummyDevice::new();
// Validate reset is no-op.
assert!(dummy.reset().is_none());
let mut d = MmioTransport::new(m, interrupt, Arc::new(Mutex::new(dummy)), false);
// We just make sure here that the implementation of a mmio device behaves as we expect,
// given a known virtio device implementation (the dummy device).
assert_eq!(d.locked_device().queue_events().len(), 2);
d.queue_select = 0;
assert_eq!(d.with_queue(0, |q| q.max_size), 16);
assert!(d.with_queue_mut(|q| q.size = 16));
assert_eq!(d.locked_device().queues()[d.queue_select as usize].size, 16);
d.queue_select = 1;
assert_eq!(d.with_queue(0, |q| q.max_size), 32);
assert!(d.with_queue_mut(|q| q.size = 16));
assert_eq!(d.locked_device().queues()[d.queue_select as usize].size, 16);
d.queue_select = 2;
assert_eq!(d.with_queue(0, |q| q.max_size), 0);
assert!(!d.with_queue_mut(|q| q.size = 16));
}
#[test]
fn test_bus_device_read() {
let m = single_region_mem(0x1000);
let interrupt = Arc::new(IrqTrigger::new());
let mut d = MmioTransport::new(
m,
interrupt,
Arc::new(Mutex::new(DummyDevice::new())),
false,
);
let mut buf = vec![0xff, 0, 0xfe, 0];
let buf_copy = buf.to_vec();
// The following read shouldn't be valid, because the length of the buf is not 4.
buf.push(0);
d.read(0x0, 0, &mut buf[..]);
assert_eq!(buf[..4], buf_copy[..]);
// the length is ok again
buf.pop();
// Now we test that reading at various predefined offsets works as intended.
d.read(0x0, 0, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), MMIO_MAGIC_VALUE);
d.read(0x0, 0x04, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), MMIO_VERSION);
d.read(0x0, 0x08, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), d.locked_device().device_type());
d.read(0x0, 0x0c, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), VENDOR_ID);
d.features_select = 0;
d.read(0x0, 0x10, &mut buf[..]);
assert_eq!(
read_le_u32(&buf[..]),
d.locked_device().avail_features_by_page(0)
);
d.features_select = 1;
d.read(0x0, 0x10, &mut buf[..]);
assert_eq!(
read_le_u32(&buf[..]),
d.locked_device().avail_features_by_page(0) | 0x1
);
d.read(0x0, 0x34, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), 16);
d.read(0x0, 0x44, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), u32::from(false));
d.interrupt.irq_status.store(111, Ordering::SeqCst);
d.read(0x0, 0x60, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), 111);
d.is_vhost_user = true;
d.interrupt.status().store(0, Ordering::SeqCst);
d.read(0x0, 0x60, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), VIRTIO_MMIO_INT_VRING);
d.is_vhost_user = true;
d.interrupt
.irq_status
.store(VIRTIO_MMIO_INT_CONFIG, Ordering::SeqCst);
d.read(0x0, 0x60, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), VIRTIO_MMIO_INT_CONFIG);
d.read(0x0, 0x70, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), 0);
d.config_generation = 5;
d.read(0x0, 0xfc, &mut buf[..]);
assert_eq!(read_le_u32(&buf[..]), 5);
// This read shouldn't do anything, as it's past the readable generic registers, and
// before the device specific configuration space. Btw, reads from the device specific
// conf space are going to be tested a bit later, alongside writes.
buf = buf_copy.to_vec();
d.read(0x0, 0xfd, &mut buf[..]);
assert_eq!(buf[..], buf_copy[..]);
// Read from an invalid address in generic register range.
d.read(0x0, 0xfb, &mut buf[..]);
assert_eq!(buf[..], buf_copy[..]);
// Read from an invalid length in generic register range.
d.read(0x0, 0xfc, &mut buf[..3]);
assert_eq!(buf[..], buf_copy[..]);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_bus_device_write() {
let m = single_region_mem(0x1000);
let interrupt = Arc::new(IrqTrigger::new());
let dummy_dev = Arc::new(Mutex::new(DummyDevice::new()));
let mut d = MmioTransport::new(m, interrupt, dummy_dev.clone(), false);
let mut buf = vec![0; 5];
write_le_u32(&mut buf[..4], 1);
// Nothing should happen, because the slice len > 4.
d.features_select = 0;
d.write(0x0, 0x14, &buf[..]);
assert_eq!(d.features_select, 0);
buf.pop();
assert_eq!(d.device_status, device_status::INIT);
set_device_status(&mut d, device_status::ACKNOWLEDGE);
// Acking features in invalid state shouldn't take effect.
assert_eq!(d.locked_device().acked_features(), 0x0);
d.acked_features_select = 0x0;
write_le_u32(&mut buf[..], 1);
d.write(0x0, 0x20, &buf[..]);
assert_eq!(d.locked_device().acked_features(), 0x0);
// Write to device specific configuration space should be ignored before setting
// device_status::DRIVER
let buf1 = vec![1; 0xeff];
for i in (0..0xeff).rev() {
let mut buf2 = vec![0; 0xeff];
d.write(0x0, 0x100 + i as u64, &buf1[i..]);
d.read(0x0, 0x100, &mut buf2[..]);
for item in buf2.iter().take(0xeff) {
assert_eq!(*item, 0);
}
}
set_device_status(&mut d, device_status::ACKNOWLEDGE | device_status::DRIVER);
assert_eq!(
d.device_status,
device_status::ACKNOWLEDGE | device_status::DRIVER
);
// now writes should work
d.features_select = 0;
write_le_u32(&mut buf[..], 1);
d.write(0x0, 0x14, &buf[..]);
assert_eq!(d.features_select, 1);
// Test acknowledging features on bus.
d.acked_features_select = 0;
write_le_u32(&mut buf[..], 0x124);
// Set the device available features in order to make acknowledging possible.
dummy_dev.lock().unwrap().set_avail_features(0x124);
d.write(0x0, 0x20, &buf[..]);
assert_eq!(d.locked_device().acked_features(), 0x124);
d.acked_features_select = 0;
write_le_u32(&mut buf[..], 2);
d.write(0x0, 0x24, &buf[..]);
assert_eq!(d.acked_features_select, 2);
set_device_status(
&mut d,
device_status::ACKNOWLEDGE | device_status::DRIVER | device_status::FEATURES_OK,
);
// Acking features in invalid state shouldn't take effect.
assert_eq!(d.locked_device().acked_features(), 0x124);
d.acked_features_select = 0x0;
write_le_u32(&mut buf[..], 1);
d.write(0x0, 0x20, &buf[..]);
assert_eq!(d.locked_device().acked_features(), 0x124);
// Setup queues
d.queue_select = 0;
write_le_u32(&mut buf[..], 3);
d.write(0x0, 0x30, &buf[..]);
assert_eq!(d.queue_select, 3);
d.queue_select = 0;
assert_eq!(d.locked_device().queues()[0].size, 16);
write_le_u32(&mut buf[..], 16);
d.write(0x0, 0x38, &buf[..]);
assert_eq!(d.locked_device().queues()[0].size, 16);
assert!(!d.locked_device().queues()[0].ready);
write_le_u32(&mut buf[..], 1);
d.write(0x0, 0x44, &buf[..]);
assert!(d.locked_device().queues()[0].ready);
assert_eq!(d.locked_device().queues()[0].desc_table_address.0, 0);
write_le_u32(&mut buf[..], 123);
d.write(0x0, 0x80, &buf[..]);
assert_eq!(d.locked_device().queues()[0].desc_table_address.0, 123);
d.write(0x0, 0x84, &buf[..]);
assert_eq!(
d.locked_device().queues()[0].desc_table_address.0,
123 + (123 << 32)
);
assert_eq!(d.locked_device().queues()[0].avail_ring_address.0, 0);
write_le_u32(&mut buf[..], 124);
d.write(0x0, 0x90, &buf[..]);
assert_eq!(d.locked_device().queues()[0].avail_ring_address.0, 124);
d.write(0x0, 0x94, &buf[..]);
assert_eq!(
d.locked_device().queues()[0].avail_ring_address.0,
124 + (124 << 32)
);
assert_eq!(d.locked_device().queues()[0].used_ring_address.0, 0);
write_le_u32(&mut buf[..], 125);
d.write(0x0, 0xa0, &buf[..]);
assert_eq!(d.locked_device().queues()[0].used_ring_address.0, 125);
d.write(0x0, 0xa4, &buf[..]);
assert_eq!(
d.locked_device().queues()[0].used_ring_address.0,
125 + (125 << 32)
);
set_device_status(
&mut d,
device_status::ACKNOWLEDGE
| device_status::DRIVER
| device_status::FEATURES_OK
| device_status::DRIVER_OK,
);
d.interrupt.irq_status.store(0b10_1010, Ordering::Relaxed);
write_le_u32(&mut buf[..], 0b111);
d.write(0x0, 0x64, &buf[..]);
assert_eq!(d.interrupt.irq_status.load(Ordering::Relaxed), 0b10_1000);
// Write to an invalid address in generic register range.
write_le_u32(&mut buf[..], 0xf);
d.config_generation = 0;
d.write(0x0, 0xfb, &buf[..]);
assert_eq!(d.config_generation, 0);
// Write to an invalid length in generic register range.
d.write(0x0, 0xfc, &buf[..2]);
assert_eq!(d.config_generation, 0);
// Here we test writes/read into/from the device specific configuration space.
let buf1 = vec![1; 0xeff];
for i in (0..0xeff).rev() {
let mut buf2 = vec![0; 0xeff];
d.write(0x0, 0x100 + i as u64, &buf1[i..]);
d.read(0x0, 0x100, &mut buf2[..]);
for item in buf2.iter().take(i) {
assert_eq!(*item, 0);
}
assert_eq!(buf1[i..], buf2[i..]);
}
}
#[test]
fn test_bus_device_activate() {
let m = single_region_mem(0x1000);
let interrupt = Arc::new(IrqTrigger::new());
let mut d = MmioTransport::new(
m,
interrupt,
Arc::new(Mutex::new(DummyDevice::new())),
false,
);
assert!(!d.locked_device().is_activated());
assert_eq!(d.device_status, device_status::INIT);
set_device_status(&mut d, device_status::ACKNOWLEDGE);
set_device_status(&mut d, device_status::ACKNOWLEDGE | device_status::DRIVER);
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/transport/pci/device.rs | src/vmm/src/devices/virtio/transport/pci/device.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use std::cmp;
use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::io::{ErrorKind, Write};
use std::sync::atomic::{AtomicBool, AtomicU16, AtomicU32, AtomicUsize, Ordering};
use std::sync::{Arc, Barrier, Mutex};
use kvm_ioctls::{IoEventAddress, NoDatamatch};
use log::warn;
use pci::{
PciBdf, PciCapabilityId, PciClassCode, PciMassStorageSubclass, PciNetworkControllerSubclass,
PciSubclass,
};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use vm_allocator::{AddressAllocator, AllocPolicy, RangeInclusive};
use vm_memory::{Address, ByteValued, GuestAddress, Le32};
use vmm_sys_util::errno;
use vmm_sys_util::eventfd::EventFd;
use crate::Vm;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::generated::virtio_ids;
use crate::devices::virtio::queue::Queue;
use crate::devices::virtio::transport::pci::common_config::{
VirtioPciCommonConfig, VirtioPciCommonConfigState,
};
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::logger::{debug, error};
use crate::pci::configuration::{PciCapability, PciConfiguration, PciConfigurationState};
use crate::pci::msix::{MsixCap, MsixConfig, MsixConfigState};
use crate::pci::{BarReprogrammingParams, DeviceRelocationError, PciDevice};
use crate::snapshot::Persist;
use crate::utils::u64_to_usize;
use crate::vstate::bus::BusDevice;
use crate::vstate::interrupts::{InterruptError, MsixVectorGroup};
use crate::vstate::memory::GuestMemoryMmap;
use crate::vstate::resources::ResourceAllocator;
const DEVICE_INIT: u8 = 0x00;
const DEVICE_ACKNOWLEDGE: u8 = 0x01;
const DEVICE_DRIVER: u8 = 0x02;
const DEVICE_DRIVER_OK: u8 = 0x04;
const DEVICE_FEATURES_OK: u8 = 0x08;
const DEVICE_FAILED: u8 = 0x80;
/// Vector value used to disable MSI for a queue.
pub const VIRTQ_MSI_NO_VECTOR: u16 = 0xffff;
/// BAR index we are using for VirtIO configuration
const VIRTIO_BAR_INDEX: u8 = 0;
enum PciCapabilityType {
Common = 1,
Notify = 2,
Isr = 3,
Device = 4,
Pci = 5,
SharedMemory = 8,
}
// This offset represents the 2 bytes omitted from the VirtioPciCap structure
// as they are already handled through add_capability(). These 2 bytes are the
// fields cap_vndr (1 byte) and cap_next (1 byte) defined in the virtio spec.
const VIRTIO_PCI_CAP_OFFSET: usize = 2;
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
struct VirtioPciCap {
cap_len: u8, // Generic PCI field: capability length
cfg_type: u8, // Identifies the structure.
pci_bar: u8, // Where to find it.
id: u8, // Multiple capabilities of the same type.
padding: [u8; 2], // Pad to full dword.
offset: Le32, // Offset within bar.
length: Le32, // Length of the structure, in bytes.
}
// SAFETY: All members are simple numbers and any value is valid.
unsafe impl ByteValued for VirtioPciCap {}
impl PciCapability for VirtioPciCap {
fn bytes(&self) -> &[u8] {
self.as_slice()
}
fn id(&self) -> PciCapabilityId {
PciCapabilityId::VendorSpecific
}
}
const VIRTIO_PCI_CAP_LEN_OFFSET: u8 = 2;
impl VirtioPciCap {
pub fn new(cfg_type: PciCapabilityType, offset: u32, length: u32) -> Self {
VirtioPciCap {
cap_len: u8::try_from(std::mem::size_of::<VirtioPciCap>()).unwrap()
+ VIRTIO_PCI_CAP_LEN_OFFSET,
cfg_type: cfg_type as u8,
pci_bar: VIRTIO_BAR_INDEX,
id: 0,
padding: [0; 2],
offset: Le32::from(offset),
length: Le32::from(length),
}
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Default)]
struct VirtioPciNotifyCap {
cap: VirtioPciCap,
notify_off_multiplier: Le32,
}
// SAFETY: All members are simple numbers and any value is valid.
unsafe impl ByteValued for VirtioPciNotifyCap {}
impl PciCapability for VirtioPciNotifyCap {
fn bytes(&self) -> &[u8] {
self.as_slice()
}
fn id(&self) -> PciCapabilityId {
PciCapabilityId::VendorSpecific
}
}
impl VirtioPciNotifyCap {
pub fn new(cfg_type: PciCapabilityType, offset: u32, length: u32, multiplier: Le32) -> Self {
VirtioPciNotifyCap {
cap: VirtioPciCap {
cap_len: u8::try_from(std::mem::size_of::<VirtioPciNotifyCap>()).unwrap()
+ VIRTIO_PCI_CAP_LEN_OFFSET,
cfg_type: cfg_type as u8,
pci_bar: VIRTIO_BAR_INDEX,
id: 0,
padding: [0; 2],
offset: Le32::from(offset),
length: Le32::from(length),
},
notify_off_multiplier: multiplier,
}
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
struct VirtioPciCfgCap {
cap: VirtioPciCap,
pci_cfg_data: [u8; 4],
}
// SAFETY: All members are simple numbers and any value is valid.
unsafe impl ByteValued for VirtioPciCfgCap {}
impl PciCapability for VirtioPciCfgCap {
fn bytes(&self) -> &[u8] {
self.as_slice()
}
fn id(&self) -> PciCapabilityId {
PciCapabilityId::VendorSpecific
}
}
impl VirtioPciCfgCap {
fn new() -> Self {
VirtioPciCfgCap {
cap: VirtioPciCap {
cap_len: u8::try_from(size_of::<Self>()).unwrap() + VIRTIO_PCI_CAP_LEN_OFFSET,
cfg_type: PciCapabilityType::Pci as u8,
pci_bar: VIRTIO_BAR_INDEX,
id: 0,
padding: [0; 2],
offset: Le32::from(0),
length: Le32::from(0),
},
..Default::default()
}
}
}
#[derive(Debug, Clone, Copy, Default)]
struct VirtioPciCfgCapInfo {
offset: usize,
cap: VirtioPciCfgCap,
}
#[derive(Debug, Copy, Clone)]
pub enum PciVirtioSubclass {
NonTransitionalBase = 0xff,
}
impl PciSubclass for PciVirtioSubclass {
fn get_register_value(&self) -> u8 {
*self as u8
}
}
// Allocate one bar for the structs pointed to by the capability structures.
// As per the PCI specification, because the same BAR shares MSI-X and non
// MSI-X structures, it is recommended to use 8KiB alignment for all those
// structures.
const COMMON_CONFIG_BAR_OFFSET: u64 = 0x0000;
const COMMON_CONFIG_SIZE: u64 = 56;
const ISR_CONFIG_BAR_OFFSET: u64 = 0x2000;
const ISR_CONFIG_SIZE: u64 = 1;
const DEVICE_CONFIG_BAR_OFFSET: u64 = 0x4000;
const DEVICE_CONFIG_SIZE: u64 = 0x1000;
const NOTIFICATION_BAR_OFFSET: u64 = 0x6000;
const NOTIFICATION_SIZE: u64 = 0x1000;
const MSIX_TABLE_BAR_OFFSET: u64 = 0x8000;
// The size is 256KiB because the table can hold up to 2048 entries, with each
// entry being 128 bits (4 DWORDS).
const MSIX_TABLE_SIZE: u64 = 0x40000;
const MSIX_PBA_BAR_OFFSET: u64 = 0x48000;
// The size is 2KiB because the Pending Bit Array has one bit per vector and it
// can support up to 2048 vectors.
const MSIX_PBA_SIZE: u64 = 0x800;
/// The BAR size must be a power of 2.
pub const CAPABILITY_BAR_SIZE: u64 = 0x80000;
const VIRTIO_COMMON_BAR_INDEX: usize = 0;
const VIRTIO_SHM_BAR_INDEX: usize = 2;
const NOTIFY_OFF_MULTIPLIER: u32 = 4; // A dword per notification address.
const VIRTIO_PCI_VENDOR_ID: u16 = 0x1af4;
const VIRTIO_PCI_DEVICE_ID_BASE: u16 = 0x1040; // Add to device type to get device ID.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VirtioPciDeviceState {
pub pci_device_bdf: PciBdf,
pub device_activated: bool,
pub cap_pci_cfg_offset: usize,
pub cap_pci_cfg: Vec<u8>,
pub pci_configuration_state: PciConfigurationState,
pub pci_dev_state: VirtioPciCommonConfigState,
pub msix_state: MsixConfigState,
pub bar_address: u64,
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VirtioPciDeviceError {
/// Failed creating VirtioPciDevice: {0}
CreateVirtioPciDevice(#[from] DeviceRelocationError),
/// Error creating MSI configuration: {0}
Msi(#[from] InterruptError),
}
pub struct VirtioPciDevice {
id: String,
// BDF assigned to the device
pci_device_bdf: PciBdf,
// PCI configuration registers.
configuration: PciConfiguration,
// virtio PCI common configuration
common_config: VirtioPciCommonConfig,
// Virtio device reference and status
device: Arc<Mutex<dyn VirtioDevice>>,
device_activated: Arc<AtomicBool>,
// PCI interrupts.
virtio_interrupt: Option<Arc<VirtioInterruptMsix>>,
// Guest memory
memory: GuestMemoryMmap,
// Add a dedicated structure to hold information about the very specific
// virtio-pci capability VIRTIO_PCI_CAP_PCI_CFG. This is needed to support
// the legacy/backward compatible mechanism of letting the guest access the
// other virtio capabilities without mapping the PCI BARs. This can be
// needed when the guest tries to early access the virtio configuration of
// a device.
cap_pci_cfg_info: VirtioPciCfgCapInfo,
// Allocated address for the BAR
pub bar_address: u64,
}
impl Debug for VirtioPciDevice {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
f.debug_struct("VirtioPciDevice")
.field("id", &self.id)
.finish()
}
}
impl VirtioPciDevice {
fn pci_configuration(
virtio_device_type: u32,
msix_config: &Arc<Mutex<MsixConfig>>,
) -> PciConfiguration {
let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + u16::try_from(virtio_device_type).unwrap();
let (class, subclass) = match virtio_device_type {
virtio_ids::VIRTIO_ID_NET => (
PciClassCode::NetworkController,
&PciNetworkControllerSubclass::EthernetController as &dyn PciSubclass,
),
virtio_ids::VIRTIO_ID_BLOCK => (
PciClassCode::MassStorage,
&PciMassStorageSubclass::MassStorage as &dyn PciSubclass,
),
_ => (
PciClassCode::Other,
&PciVirtioSubclass::NonTransitionalBase as &dyn PciSubclass,
),
};
PciConfiguration::new_type0(
VIRTIO_PCI_VENDOR_ID,
pci_device_id,
0x1, // For modern virtio-PCI devices
class,
subclass,
VIRTIO_PCI_VENDOR_ID,
pci_device_id,
Some(msix_config.clone()),
)
}
/// Allocate the PCI BAR for the VirtIO device and its associated capabilities.
///
/// This must happen only during the creation of a brand new VM. When a VM is restored from a
/// known state, the BARs are already created with the right content, therefore we don't need
/// to go through this codepath.
pub fn allocate_bars(&mut self, mmio64_allocator: &mut AddressAllocator) {
let device_clone = self.device.clone();
let device = device_clone.lock().unwrap();
// Allocate the virtio-pci capability BAR.
// See http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-740004
let virtio_pci_bar_addr = mmio64_allocator
.allocate(
CAPABILITY_BAR_SIZE,
CAPABILITY_BAR_SIZE,
AllocPolicy::FirstMatch,
)
.unwrap()
.start();
self.configuration.add_pci_bar(
VIRTIO_COMMON_BAR_INDEX,
virtio_pci_bar_addr,
CAPABILITY_BAR_SIZE,
);
// Once the BARs are allocated, the capabilities can be added to the PCI configuration.
self.add_pci_capabilities();
self.bar_address = virtio_pci_bar_addr;
}
/// Constructs a new PCI transport for the given virtio device.
pub fn new(
id: String,
memory: GuestMemoryMmap,
device: Arc<Mutex<dyn VirtioDevice>>,
msix_vectors: Arc<MsixVectorGroup>,
pci_device_bdf: u32,
) -> Result<Self, VirtioPciDeviceError> {
let num_queues = device.lock().expect("Poisoned lock").queues().len();
let msix_config = Arc::new(Mutex::new(MsixConfig::new(
msix_vectors.clone(),
pci_device_bdf,
)));
let pci_config = Self::pci_configuration(
device.lock().expect("Poisoned lock").device_type(),
&msix_config,
);
let virtio_common_config = VirtioPciCommonConfig::new(VirtioPciCommonConfigState {
driver_status: 0,
config_generation: 0,
device_feature_select: 0,
driver_feature_select: 0,
queue_select: 0,
msix_config: VIRTQ_MSI_NO_VECTOR,
msix_queues: vec![VIRTQ_MSI_NO_VECTOR; num_queues],
});
let interrupt = Arc::new(VirtioInterruptMsix::new(
msix_config.clone(),
virtio_common_config.msix_config.clone(),
virtio_common_config.msix_queues.clone(),
msix_vectors,
));
let virtio_pci_device = VirtioPciDevice {
id,
pci_device_bdf: pci_device_bdf.into(),
configuration: pci_config,
common_config: virtio_common_config,
device,
device_activated: Arc::new(AtomicBool::new(false)),
virtio_interrupt: Some(interrupt),
memory,
cap_pci_cfg_info: VirtioPciCfgCapInfo::default(),
bar_address: 0,
};
Ok(virtio_pci_device)
}
pub fn new_from_state(
id: String,
vm: &Arc<Vm>,
device: Arc<Mutex<dyn VirtioDevice>>,
state: VirtioPciDeviceState,
) -> Result<Self, VirtioPciDeviceError> {
let msix_config =
MsixConfig::from_state(state.msix_state, vm.clone(), state.pci_device_bdf.into())?;
let vectors = msix_config.vectors.clone();
let msix_config = Arc::new(Mutex::new(msix_config));
let pci_config = PciConfiguration::type0_from_state(
state.pci_configuration_state,
Some(msix_config.clone()),
);
let virtio_common_config = VirtioPciCommonConfig::new(state.pci_dev_state);
let cap_pci_cfg_info = VirtioPciCfgCapInfo {
offset: state.cap_pci_cfg_offset,
cap: *VirtioPciCfgCap::from_slice(&state.cap_pci_cfg).unwrap(),
};
let interrupt = Arc::new(VirtioInterruptMsix::new(
msix_config.clone(),
virtio_common_config.msix_config.clone(),
virtio_common_config.msix_queues.clone(),
vectors,
));
let virtio_pci_device = VirtioPciDevice {
id,
pci_device_bdf: state.pci_device_bdf,
configuration: pci_config,
common_config: virtio_common_config,
device,
device_activated: Arc::new(AtomicBool::new(state.device_activated)),
virtio_interrupt: Some(interrupt),
memory: vm.guest_memory().clone(),
cap_pci_cfg_info,
bar_address: state.bar_address,
};
if state.device_activated {
virtio_pci_device
.device
.lock()
.expect("Poisoned lock")
.activate(
virtio_pci_device.memory.clone(),
virtio_pci_device.virtio_interrupt.as_ref().unwrap().clone(),
);
}
Ok(virtio_pci_device)
}
fn is_driver_ready(&self) -> bool {
let ready_bits =
(DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK);
self.common_config.driver_status == ready_bits
&& self.common_config.driver_status & DEVICE_FAILED == 0
}
/// Determines if the driver has requested the device (re)init / reset itself
fn is_driver_init(&self) -> bool {
self.common_config.driver_status == DEVICE_INIT
}
pub fn config_bar_addr(&self) -> u64 {
self.configuration.get_bar_addr(VIRTIO_BAR_INDEX as usize)
}
fn add_pci_capabilities(&mut self) {
// Add pointers to the different configuration structures from the PCI capabilities.
let common_cap = VirtioPciCap::new(
PciCapabilityType::Common,
COMMON_CONFIG_BAR_OFFSET.try_into().unwrap(),
COMMON_CONFIG_SIZE.try_into().unwrap(),
);
self.configuration.add_capability(&common_cap);
let isr_cap = VirtioPciCap::new(
PciCapabilityType::Isr,
ISR_CONFIG_BAR_OFFSET.try_into().unwrap(),
ISR_CONFIG_SIZE.try_into().unwrap(),
);
self.configuration.add_capability(&isr_cap);
// TODO(dgreid) - set based on device's configuration size?
let device_cap = VirtioPciCap::new(
PciCapabilityType::Device,
DEVICE_CONFIG_BAR_OFFSET.try_into().unwrap(),
DEVICE_CONFIG_SIZE.try_into().unwrap(),
);
self.configuration.add_capability(&device_cap);
let notify_cap = VirtioPciNotifyCap::new(
PciCapabilityType::Notify,
NOTIFICATION_BAR_OFFSET.try_into().unwrap(),
NOTIFICATION_SIZE.try_into().unwrap(),
Le32::from(NOTIFY_OFF_MULTIPLIER),
);
self.configuration.add_capability(¬ify_cap);
let configuration_cap = VirtioPciCfgCap::new();
self.cap_pci_cfg_info.offset =
self.configuration.add_capability(&configuration_cap) + VIRTIO_PCI_CAP_OFFSET;
self.cap_pci_cfg_info.cap = configuration_cap;
if let Some(interrupt) = &self.virtio_interrupt {
let msix_cap = MsixCap::new(
VIRTIO_BAR_INDEX,
interrupt
.msix_config
.lock()
.expect("Poisoned lock")
.vectors
.num_vectors(),
MSIX_TABLE_BAR_OFFSET.try_into().unwrap(),
VIRTIO_BAR_INDEX,
MSIX_PBA_BAR_OFFSET.try_into().unwrap(),
);
self.configuration.add_capability(&msix_cap);
}
}
fn read_cap_pci_cfg(&mut self, offset: usize, mut data: &mut [u8]) {
let cap_slice = self.cap_pci_cfg_info.cap.as_slice();
let data_len = data.len();
let cap_len = cap_slice.len();
if offset + data_len > cap_len {
error!("Failed to read cap_pci_cfg from config space");
return;
}
if offset < std::mem::size_of::<VirtioPciCap>() {
if let Some(end) = offset.checked_add(data_len) {
// This write can't fail, offset and end are checked against config_len.
data.write_all(&cap_slice[offset..cmp::min(end, cap_len)])
.unwrap();
}
} else {
let bar_offset: u32 = self.cap_pci_cfg_info.cap.cap.offset.into();
let len = u32::from(self.cap_pci_cfg_info.cap.cap.length) as usize;
// BAR reads expect that the buffer has the exact size of the field that
// offset is pointing to. So, do some check that the `length` has a meaningful value
// and only use the part of the buffer we actually need.
if len <= 4 {
self.read_bar(0, bar_offset as u64, &mut data[..len]);
}
}
}
fn write_cap_pci_cfg(&mut self, offset: usize, data: &[u8]) -> Option<Arc<Barrier>> {
let cap_slice = self.cap_pci_cfg_info.cap.as_mut_slice();
let data_len = data.len();
let cap_len = cap_slice.len();
if offset + data_len > cap_len {
error!("Failed to write cap_pci_cfg to config space");
return None;
}
if offset < std::mem::size_of::<VirtioPciCap>() {
let (_, right) = cap_slice.split_at_mut(offset);
right[..data_len].copy_from_slice(data);
None
} else {
let bar_offset: u32 = self.cap_pci_cfg_info.cap.cap.offset.into();
let len = u32::from(self.cap_pci_cfg_info.cap.cap.length) as usize;
// BAR writes expect that the buffer has the exact size of the field that
// offset is pointing to. So, do some check that the `length` has a meaningful value
// and only use the part of the buffer we actually need.
if len <= 4 {
let len = len.min(data.len());
self.write_bar(0, bar_offset as u64, &data[..len])
} else {
None
}
}
}
pub fn virtio_device(&self) -> Arc<Mutex<dyn VirtioDevice>> {
self.device.clone()
}
fn needs_activation(&self) -> bool {
!self.device_activated.load(Ordering::SeqCst) && self.is_driver_ready()
}
/// Register the IoEvent notification for a VirtIO device
pub fn register_notification_ioevent(&self, vm: &Vm) -> Result<(), errno::Error> {
let bar_addr = self.config_bar_addr();
for (i, queue_evt) in self
.device
.lock()
.expect("Poisoned lock")
.queue_events()
.iter()
.enumerate()
{
let notify_base = bar_addr + NOTIFICATION_BAR_OFFSET;
let io_addr =
IoEventAddress::Mmio(notify_base + i as u64 * NOTIFY_OFF_MULTIPLIER as u64);
vm.fd().register_ioevent(queue_evt, &io_addr, NoDatamatch)?;
}
Ok(())
}
pub fn state(&self) -> VirtioPciDeviceState {
VirtioPciDeviceState {
pci_device_bdf: self.pci_device_bdf,
device_activated: self.device_activated.load(Ordering::Acquire),
cap_pci_cfg_offset: self.cap_pci_cfg_info.offset,
cap_pci_cfg: self.cap_pci_cfg_info.cap.bytes().to_vec(),
pci_configuration_state: self.configuration.state(),
pci_dev_state: self.common_config.state(),
msix_state: self
.virtio_interrupt
.as_ref()
.unwrap()
.msix_config
.lock()
.expect("Poisoned lock")
.state(),
bar_address: self.bar_address,
}
}
}
pub struct VirtioInterruptMsix {
msix_config: Arc<Mutex<MsixConfig>>,
config_vector: Arc<AtomicU16>,
queues_vectors: Arc<Mutex<Vec<u16>>>,
vectors: Arc<MsixVectorGroup>,
}
impl std::fmt::Debug for VirtioInterruptMsix {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("VirtioInterruptMsix")
.field("msix_config", &self.msix_config)
.field("config_vector", &self.config_vector)
.field("queues_vectors", &self.queues_vectors)
.finish()
}
}
impl VirtioInterruptMsix {
pub fn new(
msix_config: Arc<Mutex<MsixConfig>>,
config_vector: Arc<AtomicU16>,
queues_vectors: Arc<Mutex<Vec<u16>>>,
vectors: Arc<MsixVectorGroup>,
) -> Self {
VirtioInterruptMsix {
msix_config,
config_vector,
queues_vectors,
vectors,
}
}
}
impl VirtioInterrupt for VirtioInterruptMsix {
fn trigger(&self, int_type: VirtioInterruptType) -> Result<(), InterruptError> {
let vector = match int_type {
VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire),
VirtioInterruptType::Queue(queue_index) => *self
.queues_vectors
.lock()
.unwrap()
.get(queue_index as usize)
.ok_or(InterruptError::InvalidVectorIndex(queue_index as usize))?,
};
if vector == VIRTQ_MSI_NO_VECTOR {
return Ok(());
}
let config = &mut self.msix_config.lock().unwrap();
let entry = &config.table_entries[vector as usize];
// In case the vector control register associated with the entry
// has its first bit set, this means the vector is masked and the
// device should not inject the interrupt.
// Instead, the Pending Bit Array table is updated to reflect there
// is a pending interrupt for this specific vector.
if config.masked || entry.masked() {
config.set_pba_bit(vector, false);
return Ok(());
}
self.vectors.trigger(vector as usize)
}
fn notifier(&self, int_type: VirtioInterruptType) -> Option<&EventFd> {
let vector = match int_type {
VirtioInterruptType::Config => self.config_vector.load(Ordering::Acquire),
VirtioInterruptType::Queue(queue_index) => *self
.queues_vectors
.lock()
.unwrap()
.get(queue_index as usize)?,
};
self.vectors.notifier(vector as usize)
}
fn status(&self) -> Arc<AtomicU32> {
Arc::new(AtomicU32::new(0))
}
#[cfg(test)]
fn has_pending_interrupt(&self, interrupt_type: VirtioInterruptType) -> bool {
false
}
#[cfg(test)]
fn ack_interrupt(&self, interrupt_type: VirtioInterruptType) {
// Do nothing here
}
}
impl PciDevice for VirtioPciDevice {
fn write_config_register(
&mut self,
reg_idx: usize,
offset: u64,
data: &[u8],
) -> Option<Arc<Barrier>> {
// Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG
// is accessed. This capability has a special meaning as it allows the
// guest to access other capabilities without mapping the PCI BAR.
let base = reg_idx * 4;
if base + u64_to_usize(offset) >= self.cap_pci_cfg_info.offset
&& base + u64_to_usize(offset) + data.len()
<= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len()
{
let offset = base + u64_to_usize(offset) - self.cap_pci_cfg_info.offset;
self.write_cap_pci_cfg(offset, data)
} else {
self.configuration
.write_config_register(reg_idx, offset, data);
None
}
}
fn read_config_register(&mut self, reg_idx: usize) -> u32 {
// Handle the special case where the capability VIRTIO_PCI_CAP_PCI_CFG
// is accessed. This capability has a special meaning as it allows the
// guest to access other capabilities without mapping the PCI BAR.
let base = reg_idx * 4;
if base >= self.cap_pci_cfg_info.offset
&& base + 4 <= self.cap_pci_cfg_info.offset + self.cap_pci_cfg_info.cap.bytes().len()
{
let offset = base - self.cap_pci_cfg_info.offset;
let mut data = [0u8; 4];
let len = u32::from(self.cap_pci_cfg_info.cap.cap.length) as usize;
if len <= 4 {
self.read_cap_pci_cfg(offset, &mut data[..len]);
u32::from_le_bytes(data)
} else {
0
}
} else {
self.configuration.read_reg(reg_idx)
}
}
fn detect_bar_reprogramming(
&mut self,
reg_idx: usize,
data: &[u8],
) -> Option<BarReprogrammingParams> {
self.configuration.detect_bar_reprogramming(reg_idx, data)
}
fn move_bar(&mut self, old_base: u64, new_base: u64) -> Result<(), DeviceRelocationError> {
// We only update our idea of the bar in order to support free_bars() above.
// The majority of the reallocation is done inside DeviceManager.
if self.bar_address == old_base {
self.bar_address = new_base;
}
Ok(())
}
fn read_bar(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
match offset {
o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => {
self.common_config
.read(o - COMMON_CONFIG_BAR_OFFSET, data, self.device.clone())
}
o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => {
// We don't actually support legacy INT#x interrupts for VirtIO PCI devices
warn!("pci: read access to unsupported ISR status field");
data.fill(0);
}
o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE)
.contains(&o) =>
{
let device = self.device.lock().unwrap();
device.read_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
}
o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE)
.contains(&o) =>
{
// Handled with ioeventfds.
warn!("pci: unexpected read to notification BAR. Offset {o:#x}");
}
o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => {
if let Some(interrupt) = &self.virtio_interrupt {
interrupt
.msix_config
.lock()
.unwrap()
.read_table(o - MSIX_TABLE_BAR_OFFSET, data);
}
}
o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => {
if let Some(interrupt) = &self.virtio_interrupt {
interrupt
.msix_config
.lock()
.unwrap()
.read_pba(o - MSIX_PBA_BAR_OFFSET, data);
}
}
_ => (),
}
}
fn write_bar(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
match offset {
o if o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => {
self.common_config
.write(o - COMMON_CONFIG_BAR_OFFSET, data, self.device.clone())
}
o if (ISR_CONFIG_BAR_OFFSET..ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE).contains(&o) => {
// We don't actually support legacy INT#x interrupts for VirtIO PCI devices
warn!("pci: access to unsupported ISR status field");
}
o if (DEVICE_CONFIG_BAR_OFFSET..DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE)
.contains(&o) =>
{
let mut device = self.device.lock().unwrap();
device.write_config(o - DEVICE_CONFIG_BAR_OFFSET, data);
}
o if (NOTIFICATION_BAR_OFFSET..NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE)
.contains(&o) =>
{
// Handled with ioeventfds.
warn!("pci: unexpected write to notification BAR. Offset {o:#x}");
}
o if (MSIX_TABLE_BAR_OFFSET..MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE).contains(&o) => {
if let Some(interrupt) = &self.virtio_interrupt {
interrupt
.msix_config
.lock()
.unwrap()
.write_table(o - MSIX_TABLE_BAR_OFFSET, data);
}
}
o if (MSIX_PBA_BAR_OFFSET..MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE).contains(&o) => {
if let Some(interrupt) = &self.virtio_interrupt {
interrupt
.msix_config
.lock()
.unwrap()
.write_pba(o - MSIX_PBA_BAR_OFFSET, data);
}
}
_ => (),
};
// Try and activate the device if the driver status has changed
if self.needs_activation() {
debug!("Activating device");
let interrupt = Arc::clone(self.virtio_interrupt.as_ref().unwrap());
match self
.virtio_device()
.lock()
.unwrap()
.activate(self.memory.clone(), interrupt.clone())
{
Ok(()) => self.device_activated.store(true, Ordering::SeqCst),
Err(err) => {
error!("Error activating device: {err:?}");
// Section 2.1.2 of the specification states that we need to send a device
// configuration change interrupt
let _ = interrupt.trigger(VirtioInterruptType::Config);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/transport/pci/common_config.rs | src/vmm/src/devices/virtio/transport/pci/common_config.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::{Arc, Mutex};
use byteorder::{ByteOrder, LittleEndian};
use serde::{Deserialize, Serialize};
use vm_memory::GuestAddress;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::queue::Queue;
use crate::devices::virtio::transport::pci::device::VIRTQ_MSI_NO_VECTOR;
use crate::logger::warn;
pub const VIRTIO_PCI_COMMON_CONFIG_ID: &str = "virtio_pci_common_config";
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VirtioPciCommonConfigState {
pub driver_status: u8,
pub config_generation: u8,
pub device_feature_select: u32,
pub driver_feature_select: u32,
pub queue_select: u16,
pub msix_config: u16,
pub msix_queues: Vec<u16>,
}
/// Contains the data for reading and writing the common configuration structure of a virtio PCI
/// device.
///
/// * Registers:
///
/// ** About the whole device.
/// le32 device_feature_select; // 0x00 // read-write
/// le32 device_feature; // 0x04 // read-only for driver
/// le32 driver_feature_select; // 0x08 // read-write
/// le32 driver_feature; // 0x0C // read-write
/// le16 msix_config; // 0x10 // read-write
/// le16 num_queues; // 0x12 // read-only for driver
/// u8 device_status; // 0x14 // read-write (driver_status)
/// u8 config_generation; // 0x15 // read-only for driver
///
/// ** About a specific virtqueue.
/// le16 queue_select; // 0x16 // read-write
/// le16 queue_size; // 0x18 // read-write, power of 2, or 0.
/// le16 queue_msix_vector; // 0x1A // read-write
/// le16 queue_enable; // 0x1C // read-write (Ready)
/// le16 queue_notify_off; // 0x1E // read-only for driver
/// le64 queue_desc; // 0x20 // read-write
/// le64 queue_avail; // 0x28 // read-write
/// le64 queue_used; // 0x30 // read-write
#[derive(Debug)]
pub struct VirtioPciCommonConfig {
pub driver_status: u8,
pub config_generation: u8,
pub device_feature_select: u32,
pub driver_feature_select: u32,
pub queue_select: u16,
pub msix_config: Arc<AtomicU16>,
pub msix_queues: Arc<Mutex<Vec<u16>>>,
}
impl VirtioPciCommonConfig {
pub fn new(state: VirtioPciCommonConfigState) -> Self {
VirtioPciCommonConfig {
driver_status: state.driver_status,
config_generation: state.config_generation,
device_feature_select: state.device_feature_select,
driver_feature_select: state.driver_feature_select,
queue_select: state.queue_select,
msix_config: Arc::new(AtomicU16::new(state.msix_config)),
msix_queues: Arc::new(Mutex::new(state.msix_queues)),
}
}
pub fn state(&self) -> VirtioPciCommonConfigState {
VirtioPciCommonConfigState {
driver_status: self.driver_status,
config_generation: self.config_generation,
device_feature_select: self.device_feature_select,
driver_feature_select: self.driver_feature_select,
queue_select: self.queue_select,
msix_config: self.msix_config.load(Ordering::Acquire),
msix_queues: self.msix_queues.lock().unwrap().clone(),
}
}
pub fn read(&mut self, offset: u64, data: &mut [u8], device: Arc<Mutex<dyn VirtioDevice>>) {
assert!(data.len() <= 8);
match data.len() {
1 => {
let v = self.read_common_config_byte(offset);
data[0] = v;
}
2 => {
let v = self.read_common_config_word(offset, device.lock().unwrap().queues());
LittleEndian::write_u16(data, v);
}
4 => {
let v = self.read_common_config_dword(offset, device);
LittleEndian::write_u32(data, v);
}
_ => warn!(
"pci: invalid data length for virtio read: len {}",
data.len()
),
}
}
pub fn write(&mut self, offset: u64, data: &[u8], device: Arc<Mutex<dyn VirtioDevice>>) {
assert!(data.len() <= 8);
match data.len() {
1 => self.write_common_config_byte(offset, data[0]),
2 => self.write_common_config_word(
offset,
LittleEndian::read_u16(data),
device.lock().unwrap().queues_mut(),
),
4 => self.write_common_config_dword(offset, LittleEndian::read_u32(data), device),
_ => warn!(
"pci: invalid data length for virtio write: len {}",
data.len()
),
}
}
fn read_common_config_byte(&self, offset: u64) -> u8 {
// The driver is only allowed to do aligned, properly sized access.
match offset {
0x14 => self.driver_status,
0x15 => self.config_generation,
_ => {
warn!("pci: invalid virtio config byte read: 0x{:x}", offset);
0
}
}
}
fn write_common_config_byte(&mut self, offset: u64, value: u8) {
match offset {
0x14 => self.driver_status = value,
_ => {
warn!("pci: invalid virtio config byte write: 0x{:x}", offset);
}
}
}
fn read_common_config_word(&self, offset: u64, queues: &[Queue]) -> u16 {
match offset {
0x10 => self.msix_config.load(Ordering::Acquire),
0x12 => queues.len().try_into().unwrap(), // num_queues
0x16 => self.queue_select,
0x18 => self.with_queue(queues, |q| q.size).unwrap_or(0),
// If `queue_select` points to an invalid queue we should return NO_VECTOR.
// Reading from here
// https://docs.oasis-open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.html#x1-1280005:
//
// > The device MUST return vector mapped to a given event, (NO_VECTOR if unmapped) on
// > read of config_msix_vector/queue_msix_vector.
0x1a => self
.msix_queues
.lock()
.unwrap()
.get(self.queue_select as usize)
.copied()
.unwrap_or(VIRTQ_MSI_NO_VECTOR),
0x1c => u16::from(self.with_queue(queues, |q| q.ready).unwrap_or(false)),
0x1e => self.queue_select, // notify_off
_ => {
warn!("pci: invalid virtio register word read: 0x{:x}", offset);
0
}
}
}
fn write_common_config_word(&mut self, offset: u64, value: u16, queues: &mut [Queue]) {
match offset {
0x10 => {
// Make sure that the guest doesn't select an invalid vector. We are offering
// `num_queues + 1` vectors (plus one for configuration updates). If an invalid
// vector has been selected, we just store the `NO_VECTOR` value.
let mut msix_queues = self.msix_queues.lock().expect("Poisoned lock");
let nr_vectors = msix_queues.len() + 1;
if (value as usize) < nr_vectors {
self.msix_config.store(value, Ordering::Release);
} else {
self.msix_config
.store(VIRTQ_MSI_NO_VECTOR, Ordering::Release);
}
}
0x16 => self.queue_select = value,
0x18 => self.with_queue_mut(queues, |q| q.size = value),
0x1a => {
let mut msix_queues = self.msix_queues.lock().expect("Poisoned lock");
let nr_vectors = msix_queues.len() + 1;
// Make sure that `queue_select` points to a valid queue. If not, we won't do
// anything here and subsequent reads at 0x1a will return `NO_VECTOR`.
if let Some(queue) = msix_queues.get_mut(self.queue_select as usize) {
// Make sure that the guest doesn't select an invalid vector. We are offering
// `num_queues + 1` vectors (plus one for configuration updates). If an invalid
// vector has been selected, we just store the `NO_VECTOR` value.
if (value as usize) < nr_vectors {
*queue = value;
} else {
*queue = VIRTQ_MSI_NO_VECTOR;
}
}
}
0x1c => self.with_queue_mut(queues, |q| {
if value != 0 {
q.ready = value == 1;
}
}),
_ => {
warn!("pci: invalid virtio register word write: 0x{:x}", offset);
}
}
}
fn read_common_config_dword(&self, offset: u64, device: Arc<Mutex<dyn VirtioDevice>>) -> u32 {
match offset {
0x00 => self.device_feature_select,
0x04 => {
let locked_device = device.lock().unwrap();
// Only 64 bits of features (2 pages) are defined for now, so limit
// device_feature_select to avoid shifting by 64 or more bits.
if self.device_feature_select < 2 {
((locked_device.avail_features() >> (self.device_feature_select * 32))
& 0xffff_ffff) as u32
} else {
0
}
}
0x08 => self.driver_feature_select,
0x20 => {
let locked_device = device.lock().unwrap();
self.with_queue(locked_device.queues(), |q| {
(q.desc_table_address.0 & 0xffff_ffff) as u32
})
.unwrap_or_default()
}
0x24 => {
let locked_device = device.lock().unwrap();
self.with_queue(locked_device.queues(), |q| {
(q.desc_table_address.0 >> 32) as u32
})
.unwrap_or_default()
}
0x28 => {
let locked_device = device.lock().unwrap();
self.with_queue(locked_device.queues(), |q| {
(q.avail_ring_address.0 & 0xffff_ffff) as u32
})
.unwrap_or_default()
}
0x2c => {
let locked_device = device.lock().unwrap();
self.with_queue(locked_device.queues(), |q| {
(q.avail_ring_address.0 >> 32) as u32
})
.unwrap_or_default()
}
0x30 => {
let locked_device = device.lock().unwrap();
self.with_queue(locked_device.queues(), |q| {
(q.used_ring_address.0 & 0xffff_ffff) as u32
})
.unwrap_or_default()
}
0x34 => {
let locked_device = device.lock().unwrap();
self.with_queue(locked_device.queues(), |q| {
(q.used_ring_address.0 >> 32) as u32
})
.unwrap_or_default()
}
_ => {
warn!("pci: invalid virtio register dword read: 0x{:x}", offset);
0
}
}
}
fn write_common_config_dword(
&mut self,
offset: u64,
value: u32,
device: Arc<Mutex<dyn VirtioDevice>>,
) {
fn hi(v: &mut GuestAddress, x: u32) {
*v = (*v & 0xffff_ffff) | (u64::from(x) << 32)
}
fn lo(v: &mut GuestAddress, x: u32) {
*v = (*v & !0xffff_ffff) | u64::from(x)
}
let mut locked_device = device.lock().unwrap();
match offset {
0x00 => self.device_feature_select = value,
0x08 => self.driver_feature_select = value,
0x0c => locked_device.ack_features_by_page(self.driver_feature_select, value),
0x20 => self.with_queue_mut(locked_device.queues_mut(), |q| {
lo(&mut q.desc_table_address, value)
}),
0x24 => self.with_queue_mut(locked_device.queues_mut(), |q| {
hi(&mut q.desc_table_address, value)
}),
0x28 => self.with_queue_mut(locked_device.queues_mut(), |q| {
lo(&mut q.avail_ring_address, value)
}),
0x2c => self.with_queue_mut(locked_device.queues_mut(), |q| {
hi(&mut q.avail_ring_address, value)
}),
0x30 => self.with_queue_mut(locked_device.queues_mut(), |q| {
lo(&mut q.used_ring_address, value)
}),
0x34 => self.with_queue_mut(locked_device.queues_mut(), |q| {
hi(&mut q.used_ring_address, value)
}),
_ => {
warn!("pci: invalid virtio register dword write: 0x{:x}", offset);
}
}
}
fn with_queue<U, F>(&self, queues: &[Queue], f: F) -> Option<U>
where
F: FnOnce(&Queue) -> U,
{
queues.get(self.queue_select as usize).map(f)
}
fn with_queue_mut<F: FnOnce(&mut Queue)>(&self, queues: &mut [Queue], f: F) {
if let Some(queue) = queues.get_mut(self.queue_select as usize) {
f(queue);
}
}
}
#[cfg(test)]
mod tests {
use vm_memory::ByteValued;
use super::*;
use crate::devices::virtio::transport::mmio::tests::DummyDevice;
fn default_device() -> Arc<Mutex<DummyDevice>> {
Arc::new(Mutex::new(DummyDevice::new()))
}
fn default_pci_common_config() -> VirtioPciCommonConfig {
VirtioPciCommonConfig {
driver_status: 0,
config_generation: 0,
device_feature_select: 0,
driver_feature_select: 0,
queue_select: 0,
msix_config: Arc::new(AtomicU16::new(0)),
msix_queues: Arc::new(Mutex::new(vec![0u16; 2])),
}
}
#[test]
fn write_base_regs() {
let mut regs = VirtioPciCommonConfig {
driver_status: 0xaa,
config_generation: 0x55,
device_feature_select: 0x0,
driver_feature_select: 0x0,
queue_select: 0xff,
msix_config: Arc::new(AtomicU16::new(0)),
msix_queues: Arc::new(Mutex::new(vec![0; 3])),
};
let dev = Arc::new(Mutex::new(DummyDevice::new()));
// Can set all bits of driver_status.
regs.write(0x14, &[0x55], dev.clone());
let mut read_back = vec![0x00];
regs.read(0x14, &mut read_back, dev.clone());
assert_eq!(read_back[0], 0x55);
// The config generation register is read only.
regs.write(0x15, &[0xaa], dev.clone());
let mut read_back = vec![0x00];
regs.read(0x15, &mut read_back, dev.clone());
assert_eq!(read_back[0], 0x55);
// Device features is read-only and passed through from the device.
regs.write(0x04, &[0, 0, 0, 0], dev.clone());
let mut read_back = vec![0, 0, 0, 0];
regs.read(0x04, &mut read_back, dev.clone());
assert_eq!(LittleEndian::read_u32(&read_back), 0u32);
// Feature select registers are read/write.
regs.write(0x00, &[1, 2, 3, 4], dev.clone());
let mut read_back = vec![0, 0, 0, 0];
regs.read(0x00, &mut read_back, dev.clone());
assert_eq!(LittleEndian::read_u32(&read_back), 0x0403_0201);
regs.write(0x08, &[1, 2, 3, 4], dev.clone());
let mut read_back = vec![0, 0, 0, 0];
regs.read(0x08, &mut read_back, dev.clone());
assert_eq!(LittleEndian::read_u32(&read_back), 0x0403_0201);
// 'queue_select' can be read and written.
regs.write(0x16, &[0xaa, 0x55], dev.clone());
let mut read_back = vec![0x00, 0x00];
regs.read(0x16, &mut read_back, dev.clone());
assert_eq!(read_back[0], 0xaa);
assert_eq!(read_back[1], 0x55);
// Getting the MSI vector when `queue_select` points to an invalid queue should return
// NO_VECTOR (0xffff)
regs.read(0x1a, &mut read_back, dev.clone());
assert_eq!(read_back, [0xff, 0xff]);
// Writing the MSI vector of an invalid `queue_select` does not have any effect.
regs.write(0x1a, &[0x12, 0x13], dev.clone());
assert_eq!(read_back, [0xff, 0xff]);
// Valid `queue_select` though should setup the corresponding MSI-X queue.
regs.write(0x16, &[0x1, 0x0], dev.clone());
assert_eq!(regs.queue_select, 1);
regs.write(0x1a, &[0x1, 0x0], dev.clone());
regs.read(0x1a, &mut read_back, dev);
assert_eq!(LittleEndian::read_u16(&read_back[..2]), 0x1);
}
#[test]
fn test_device_feature() {
let mut config = default_pci_common_config();
let mut device = default_device();
let mut features = 0u32;
device
.lock()
.unwrap()
.set_avail_features(0x0000_1312_0000_1110);
config.read(0x04, features.as_mut_slice(), device.clone());
assert_eq!(features, 0x1110);
// select second page
config.write(0x0, 1u32.as_slice(), device.clone());
config.read(0x04, features.as_mut_slice(), device.clone());
assert_eq!(features, 0x1312);
// Try a third page. It doesn't exist so we should get all 0s
config.write(0x0, 2u32.as_slice(), device.clone());
config.read(0x04, features.as_mut_slice(), device.clone());
assert_eq!(features, 0x0);
}
#[test]
fn test_driver_feature() {
let mut config = default_pci_common_config();
let mut device = default_device();
device
.lock()
.unwrap()
.set_avail_features(0x0000_1312_0000_1110);
// ACK some features of the first page
config.write(0x0c, 0x1100u32.as_slice(), device.clone());
assert_eq!(device.lock().unwrap().acked_features(), 0x1100);
// ACK some features of the second page
config.write(0x08, 1u32.as_slice(), device.clone());
config.write(0x0c, 0x0000_1310u32.as_slice(), device.clone());
assert_eq!(
device.lock().unwrap().acked_features(),
0x0000_1310_0000_1100
);
}
#[test]
fn test_num_queues() {
let mut config = default_pci_common_config();
let mut device = default_device();
let mut num_queues = 0u16;
config.read(0x12, num_queues.as_mut_slice(), device.clone());
assert_eq!(num_queues, 2);
// `num_queues` is read-only
config.write(0x12, 4u16.as_slice(), device.clone());
config.read(0x12, num_queues.as_mut_slice(), device.clone());
assert_eq!(num_queues, 2);
}
#[test]
fn test_device_status() {
let mut config = default_pci_common_config();
let mut device = default_device();
let mut status = 0u8;
config.read(0x14, status.as_mut_slice(), device.clone());
assert_eq!(status, 0);
config.write(0x14, 0x42u8.as_slice(), device.clone());
config.read(0x14, status.as_mut_slice(), device.clone());
assert_eq!(status, 0x42);
}
#[test]
fn test_config_msix_vector() {
let mut config = default_pci_common_config();
let device = default_device();
let mut vector: u16 = 0;
// Our device has 2 queues, so we should be using 3 vectors in total.
// Trying to set a vector bigger than that should fail. Observing the
// failure happens through a subsequent read that should return NO_VECTOR.
config.write(0x10, 3u16.as_slice(), device.clone());
config.read(0x10, vector.as_mut_slice(), device.clone());
assert_eq!(vector, VIRTQ_MSI_NO_VECTOR);
// Any of the 3 valid values should work
for i in 0u16..3 {
config.write(0x10, i.as_slice(), device.clone());
config.read(0x10, vector.as_mut_slice(), device.clone());
assert_eq!(vector, i);
}
}
#[test]
fn test_queue_size() {
let mut config = default_pci_common_config();
let device = default_device();
let mut len = 0u16;
let mut max_size = [0u16; 2];
for queue_id in 0u16..2 {
config.write(0x16, queue_id.as_slice(), device.clone());
config.read(0x18, len.as_mut_slice(), device.clone());
assert_eq!(
len,
device.lock().unwrap().queues()[queue_id as usize].max_size
);
max_size[queue_id as usize] = len;
}
config.write(0x16, 2u16.as_slice(), device.clone());
config.read(0x18, len.as_mut_slice(), device.clone());
assert_eq!(len, 0);
// Setup size smaller than what is the maximum offered
for queue_id in 0u16..2 {
config.write(0x16, queue_id.as_slice(), device.clone());
config.write(
0x18,
(max_size[queue_id as usize] - 1).as_slice(),
device.clone(),
);
config.read(0x18, len.as_mut_slice(), device.clone());
assert_eq!(len, max_size[queue_id as usize] - 1);
}
}
#[test]
fn test_queue_msix_vector() {
let mut config = default_pci_common_config();
let device = default_device();
let mut vector = 0u16;
// Our device has 2 queues, so we should be using 3 vectors in total.
// Trying to set a vector bigger than that should fail. Observing the
// failure happens through a subsequent read that should return NO_VECTOR.
for queue_id in 0u16..2 {
// Select queue
config.write(0x16, queue_id.as_slice(), device.clone());
config.write(0x1a, 3u16.as_slice(), device.clone());
config.read(0x1a, vector.as_mut_slice(), device.clone());
assert_eq!(vector, VIRTQ_MSI_NO_VECTOR);
// Any of the 3 valid values should work
for vector_id in 0u16..3 {
config.write(0x1a, vector_id.as_slice(), device.clone());
config.read(0x1a, vector.as_mut_slice(), device.clone());
assert_eq!(vector, vector_id);
}
}
}
#[test]
fn test_queue_enable() {
let mut config = default_pci_common_config();
let device = default_device();
let mut enabled = 0u16;
for queue_id in 0u16..2 {
config.write(0x16, queue_id.as_slice(), device.clone());
// Initially queue should be disabled
config.read(0x1c, enabled.as_mut_slice(), device.clone());
assert_eq!(enabled, 0);
// Enable queue
config.write(0x1c, 1u16.as_slice(), device.clone());
config.read(0x1c, enabled.as_mut_slice(), device.clone());
assert_eq!(enabled, 1);
// According to the specification "The driver MUST NOT write a 0 to queue_enable."
config.write(0x1c, 0u16.as_slice(), device.clone());
config.read(0x1c, enabled.as_mut_slice(), device.clone());
assert_eq!(enabled, 1);
}
}
#[test]
fn test_queue_notify_off() {
let mut config = default_pci_common_config();
let device = default_device();
let mut offset = 0u16;
// `queue_notify_off` is an offset (index not bytes) from the notification structure
// that helps locate the address of the queue notify within the device's BAR. This is
// a field setup by the device and should be read-only for the driver
for queue_id in 0u16..2 {
config.write(0x16, queue_id.as_slice(), device.clone());
config.read(0x1e, offset.as_mut_slice(), device.clone());
assert_eq!(offset, queue_id);
// Writing to it should not have any effect
config.write(0x1e, 0x42.as_slice(), device.clone());
config.read(0x1e, offset.as_mut_slice(), device.clone());
assert_eq!(offset, queue_id);
}
}
fn write_64bit_field(
config: &mut VirtioPciCommonConfig,
device: Arc<Mutex<DummyDevice>>,
offset: u64,
value: u64,
) {
let lo32 = (value & 0xffff_ffff) as u32;
let hi32 = (value >> 32) as u32;
config.write(offset, lo32.as_slice(), device.clone());
config.write(offset + 4, hi32.as_slice(), device.clone());
}
fn read_64bit_field(
config: &mut VirtioPciCommonConfig,
device: Arc<Mutex<DummyDevice>>,
offset: u64,
) -> u64 {
let mut lo32 = 0u32;
let mut hi32 = 0u32;
config.read(offset, lo32.as_mut_slice(), device.clone());
config.read(offset + 4, hi32.as_mut_slice(), device.clone());
(lo32 as u64) | ((hi32 as u64) << 32)
}
#[test]
fn test_queue_addresses() {
let mut config = default_pci_common_config();
let device = default_device();
let mut reg64bit = 0;
for queue_id in 0u16..2 {
config.write(0x16, queue_id.as_slice(), device.clone());
for offset in [0x20, 0x28, 0x30] {
write_64bit_field(&mut config, device.clone(), offset, 0x0000_1312_0000_1110);
assert_eq!(
read_64bit_field(&mut config, device.clone(), offset),
0x0000_1312_0000_1110
);
}
}
}
#[test]
fn test_bad_width_reads() {
let mut config = default_pci_common_config();
let mut device = default_device();
// According to the VirtIO specification (section 4.1.3.1)
//
// > For device configuration access, the driver MUST use 8-bit wide accesses for 8-bit
// > wide fields, 16-bit wide and aligned accesses for 16-bit wide fields and 32-bit wide
// > and aligned accesses for 32-bit and 64-bit wide fields. For 64-bit fields, the driver
// > MAY access each of the high and low 32-bit parts of the field independently.
// 64-bit fields
device.lock().unwrap().queues_mut()[0].desc_table_address =
GuestAddress(0x0000_1312_0000_1110);
let mut buffer = [0u8; 8];
config.read(0x20, &mut buffer[..1], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x20, &mut buffer[..2], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x20, &mut buffer[..8], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x20, &mut buffer[..4], device.clone());
assert_eq!(LittleEndian::read_u32(&buffer[..4]), 0x1110);
config.read(0x24, &mut buffer[..4], device.clone());
assert_eq!(LittleEndian::read_u32(&buffer[..4]), 0x1312);
// 32-bit fields
config.device_feature_select = 0x42;
let mut buffer = [0u8; 8];
config.read(0, &mut buffer[..1], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0, &mut buffer[..2], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0, &mut buffer[..8], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0, &mut buffer[..4], device.clone());
assert_eq!(LittleEndian::read_u32(&buffer[..4]), 0x42);
// 16-bit fields
let mut buffer = [0u8; 8];
config.queue_select = 0x42;
config.read(0x16, &mut buffer[..1], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x16, &mut buffer[..4], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x16, &mut buffer[..8], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x16, &mut buffer[..2], device.clone());
assert_eq!(LittleEndian::read_u16(&buffer[..2]), 0x42);
// 8-bit fields
let mut buffer = [0u8; 8];
config.driver_status = 0x42;
config.read(0x14, &mut buffer[..2], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x14, &mut buffer[..4], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x14, &mut buffer[..8], device.clone());
assert_eq!(buffer, [0u8; 8]);
config.read(0x14, &mut buffer[..1], device.clone());
assert_eq!(buffer[0], 0x42);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/transport/pci/mod.rs | src/vmm/src/devices/virtio/transport/pci/mod.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod common_config;
pub mod device;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/packet.rs | src/vmm/src/devices/virtio/vsock/packet.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
//! `VsockPacket` provides a thin wrapper over the buffers exchanged via virtio queues.
//! There are two components to a vsock packet, each using its own descriptor in a
//! virtio queue:
//! - the packet header; and
//! - the packet data/buffer.
//!
//! There is a 1:1 relation between descriptor chains and packets: the first (chain head) holds
//! the header, and an optional second descriptor holds the data. The second descriptor is only
//! present for data packets (VSOCK_OP_RW).
//!
//! `VsockPacket` wraps these two buffers and provides direct access to the data stored
//! in guest memory. This is done to avoid unnecessarily copying data from guest memory
//! to temporary buffers, before passing it on to the vsock backend.
use std::fmt::Debug;
use vm_memory::volatile_memory::Error;
use vm_memory::{GuestMemoryError, ReadVolatile, WriteVolatile};
use super::{VsockError, defs};
use crate::devices::virtio::iovec::{IoVecBuffer, IoVecBufferMut};
use crate::devices::virtio::queue::DescriptorChain;
use crate::vstate::memory::{ByteValued, GuestMemoryMmap};
// The vsock packet header is defined by the C struct:
//
// ```C
// le64 src_cid;
// le64 dst_cid;
// le32 src_port;
// le32 dst_port;
// le32 len;
// le16 type;
// le16 op;
// le32 flags;
// le32 buf_alloc;
// le32 fwd_cnt;
// } __attribute__((packed));
// ```
// We create a rust structure that mirrors it.
// The mirroring struct is only used privately by `VsockPacket`, that offers getter and setter
// methods, for each struct field, that will also handle the correct endianess.
#[repr(C, packed)]
#[derive(Copy, Clone, Debug, Default)]
pub struct VsockPacketHeader {
// Source CID.
src_cid: u64,
// Destination CID.
dst_cid: u64,
// Source port.
src_port: u32,
// Destination port.
dst_port: u32,
// Data length (in bytes) - may be 0, if there is no data buffer.
len: u32,
// Socket type. Currently, only connection-oriented streams are defined by the vsock protocol.
type_: u16,
// Operation ID - one of the VSOCK_OP_* values; e.g.
// - VSOCK_OP_RW: a data packet;
// - VSOCK_OP_REQUEST: connection request;
// - VSOCK_OP_RST: forcefull connection termination;
// etc (see `super::defs::uapi` for the full list).
op: u16,
// Additional options (flags) associated with the current operation (`op`).
// Currently, only used with shutdown requests (VSOCK_OP_SHUTDOWN).
flags: u32,
// Size (in bytes) of the packet sender receive buffer (for the connection to which this packet
// belongs).
buf_alloc: u32,
// Number of bytes the sender has received and consumed (for the connection to which this
// packet belongs). For instance, for our Unix backend, this counter would be the total
// number of bytes we have successfully written to a backing Unix socket.
fwd_cnt: u32,
}
impl VsockPacketHeader {
pub fn src_cid(&self) -> u64 {
u64::from_le(self.src_cid)
}
pub fn set_src_cid(&mut self, cid: u64) -> &mut Self {
self.src_cid = cid.to_le();
self
}
pub fn dst_cid(&self) -> u64 {
u64::from_le(self.dst_cid)
}
pub fn set_dst_cid(&mut self, cid: u64) -> &mut Self {
self.dst_cid = cid.to_le();
self
}
pub fn src_port(&self) -> u32 {
u32::from_le(self.src_port)
}
pub fn set_src_port(&mut self, port: u32) -> &mut Self {
self.src_port = port.to_le();
self
}
pub fn dst_port(&self) -> u32 {
u32::from_le(self.dst_port)
}
pub fn set_dst_port(&mut self, port: u32) -> &mut Self {
self.dst_port = port.to_le();
self
}
pub fn len(&self) -> u32 {
u32::from_le(self.len)
}
pub fn set_len(&mut self, len: u32) -> &mut Self {
self.len = len.to_le();
self
}
pub fn type_(&self) -> u16 {
u16::from_le(self.type_)
}
pub fn set_type(&mut self, type_: u16) -> &mut Self {
self.type_ = type_.to_le();
self
}
pub fn op(&self) -> u16 {
u16::from_le(self.op)
}
pub fn set_op(&mut self, op: u16) -> &mut Self {
self.op = op.to_le();
self
}
pub fn flags(&self) -> u32 {
u32::from_le(self.flags)
}
pub fn set_flags(&mut self, flags: u32) -> &mut Self {
self.flags = flags.to_le();
self
}
pub fn set_flag(&mut self, flag: u32) -> &mut Self {
self.set_flags(self.flags() | flag);
self
}
pub fn buf_alloc(&self) -> u32 {
u32::from_le(self.buf_alloc)
}
pub fn set_buf_alloc(&mut self, buf_alloc: u32) -> &mut Self {
self.buf_alloc = buf_alloc.to_le();
self
}
pub fn fwd_cnt(&self) -> u32 {
u32::from_le(self.fwd_cnt)
}
pub fn set_fwd_cnt(&mut self, fwd_cnt: u32) -> &mut Self {
self.fwd_cnt = fwd_cnt.to_le();
self
}
}
/// The vsock packet header struct size (the struct is packed).
pub const VSOCK_PKT_HDR_SIZE: u32 = 44;
// SAFETY: `VsockPacketHeader` is a POD and contains no padding.
unsafe impl ByteValued for VsockPacketHeader {}
// /// Struct describing a single vsock packet.
// ///
// /// Encapsulates the virtio descriptor chain containing the packet through the `IoVecBuffer[Mut]`
// /// abstractions.
#[derive(Debug, Default)]
pub struct VsockPacketTx {
/// A copy of the vsock packet's 44-byte header, held in hypervisor memory
/// to minimize the number of accesses to guest memory. Can be written back
/// to geust memory using [`VsockPacket::commit_hdr`] (only for RX buffers).
pub hdr: VsockPacketHeader,
/// The raw buffer, as it is contained in guest memory (containing both
/// header and payload)
buffer: IoVecBuffer,
}
impl VsockPacketTx {
/// Create the packet wrapper from a TX virtq chain head.
///
/// ## Errors
/// Returns
/// - [`VsockError::UnreadableDescriptor`] if the provided descriptor chain contains any
/// descriptor not marked as writable.
/// - [`VsockError::DescChainTooShortForHeader`] if the descriptor chain's total buffer length
/// is insufficient to hold the 44 byte vsock header
/// - [`VsockError::InvalidPktLen`] if the contained vsock header describes a vsock packet whose
/// length would exceed [`defs::MAX_PKT_BUR_SIZE`].
/// - [`VsockError::DescChainTooShortForPacket`] if the contained vsock header describes a vsock
/// packet whose length exceeds the descriptor chain's actual total buffer length.
pub fn parse(
&mut self,
mem: &GuestMemoryMmap,
chain: DescriptorChain,
) -> Result<(), VsockError> {
// SAFETY: This descriptor chain is only loaded once
// virtio requests are handled sequentially so no two IoVecBuffers
// are live at the same time, meaning this has exclusive ownership over the memory
unsafe { self.buffer.load_descriptor_chain(mem, chain)? };
let mut hdr = VsockPacketHeader::default();
match self.buffer.read_exact_volatile_at(hdr.as_mut_slice(), 0) {
Ok(()) => (),
Err(Error::PartialBuffer { completed, .. }) => {
return Err(VsockError::DescChainTooShortForHeader(completed));
}
Err(err) => return Err(VsockError::GuestMemoryMmap(err.into())),
}
if hdr.len > defs::MAX_PKT_BUF_SIZE {
return Err(VsockError::InvalidPktLen(hdr.len));
}
if hdr.len > self.buffer.len() - VSOCK_PKT_HDR_SIZE {
return Err(VsockError::DescChainTooShortForPacket(
self.buffer.len(),
hdr.len,
));
}
self.hdr = hdr;
Ok(())
}
pub fn write_from_offset_to<T: WriteVolatile + Debug>(
&self,
dst: &mut T,
offset: u32,
count: u32,
) -> Result<u32, VsockError> {
if count
> self
.buffer
.len()
.saturating_sub(VSOCK_PKT_HDR_SIZE)
.saturating_sub(offset)
{
return Err(VsockError::GuestMemoryBounds);
}
self.buffer
.read_volatile_at(dst, (offset + VSOCK_PKT_HDR_SIZE) as usize, count as usize)
.map_err(|err| VsockError::GuestMemoryMmap(GuestMemoryError::from(err)))
.and_then(|read| read.try_into().map_err(|_| VsockError::DescChainOverflow))
}
/// Returns the total length of this [`VsockPacket`]'s buffer (e.g. the amount of data bytes
/// contained in this packet).
///
/// Return value will equal the total length of the underlying descriptor chain's buffers,
/// minus the length of the vsock header.
pub fn buf_size(&self) -> u32 {
self.buffer.len() - VSOCK_PKT_HDR_SIZE
}
}
/// Struct describing a single vsock packet.
///
/// Encapsulates the virtio descriptor chain containing the packet through the `IoVecBuffer[Mut]`
/// abstractions.
#[derive(Debug)]
pub struct VsockPacketRx {
/// A copy of the vsock packet's 44-byte header, held in hypervisor memory
/// to minimize the number of accesses to guest memory. Can be written back
/// to geust memory using [`VsockPacket::commit_hdr`] (only for RX buffers).
pub hdr: VsockPacketHeader,
/// The raw buffer, as it is contained in guest memory (containing both
/// header and payload)
buffer: IoVecBufferMut,
}
impl VsockPacketRx {
/// Creates new VsockPacketRx.
pub fn new() -> Result<Self, VsockError> {
let buffer = IoVecBufferMut::new().map_err(VsockError::IovDeque)?;
Ok(Self {
hdr: Default::default(),
buffer,
})
}
/// Create the packet wrapper from an RX virtq chain head.
///
/// ## Errors
/// Returns [`VsockError::DescChainTooShortForHeader`] if the descriptor chain's total buffer
/// length is insufficient to hold the 44 byte vsock header
pub fn parse(
&mut self,
mem: &GuestMemoryMmap,
chain: DescriptorChain,
) -> Result<(), VsockError> {
// SAFETY: This descriptor chain is only loaded once
// virtio requests are handled sequentially so no two IoVecBuffers
// are live at the same time, meaning this has exclusive ownership over the memory
unsafe { self.buffer.load_descriptor_chain(mem, chain)? };
if self.buffer.len() < VSOCK_PKT_HDR_SIZE {
return Err(VsockError::DescChainTooShortForHeader(
self.buffer.len() as usize
));
}
self.hdr = VsockPacketHeader::default();
Ok(())
}
/// Writes the local copy of the packet header to the guest memory.
///
/// ## Errors
/// The function returns [`VsockError::UnwritableDescriptor`] if this [`VsockPacket`]
/// contains a guest-to-host (TX) packet. It returned [`VsockError::InvalidPktLen`] if the
/// packet's payload as described by this [`VsockPacket`] would exceed
/// [`defs::MAX_PKT_BUF_SIZE`].
pub fn commit_hdr(&mut self) -> Result<(), VsockError> {
if self.hdr.len > defs::MAX_PKT_BUF_SIZE {
return Err(VsockError::InvalidPktLen(self.hdr.len));
}
self.buffer
.write_all_volatile_at(self.hdr.as_slice(), 0)
.map_err(GuestMemoryError::from)
.map_err(VsockError::GuestMemoryMmap)
}
/// Returns the total length of this [`VsockPacket`]'s buffer (e.g. the amount of data bytes
/// contained in this packet).
///
/// Return value will equal the total length of the underlying descriptor chain's buffers,
/// minus the length of the vsock header.
pub fn buf_size(&self) -> u32 {
self.buffer.len() - VSOCK_PKT_HDR_SIZE
}
pub fn read_at_offset_from<T: ReadVolatile + Debug>(
&mut self,
src: &mut T,
offset: u32,
count: u32,
) -> Result<u32, VsockError> {
if count
> self
.buffer
.len()
.saturating_sub(VSOCK_PKT_HDR_SIZE)
.saturating_sub(offset)
{
return Err(VsockError::GuestMemoryBounds);
}
self.buffer
.write_volatile_at(src, (offset + VSOCK_PKT_HDR_SIZE) as usize, count as usize)
.map_err(|err| VsockError::GuestMemoryMmap(GuestMemoryError::from(err)))
.and_then(|read| read.try_into().map_err(|_| VsockError::DescChainOverflow))
}
}
#[cfg(test)]
mod tests {
use vm_memory::Bytes;
use super::*;
use crate::devices::virtio::queue::VIRTQ_DESC_F_WRITE;
use crate::devices::virtio::test_utils::VirtqDesc as GuestQDesc;
use crate::devices::virtio::vsock::defs::MAX_PKT_BUF_SIZE;
use crate::devices::virtio::vsock::device::{RXQ_INDEX, TXQ_INDEX};
use crate::devices::virtio::vsock::test_utils::TestContext;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
macro_rules! create_context {
($test_ctx:ident, $handler_ctx:ident) => {
let $test_ctx = TestContext::new();
let mut $handler_ctx = $test_ctx.create_event_handler_context();
// For TX packets, hdr.len should be set to a valid value.
set_pkt_len(4096, &$handler_ctx.guest_txvq.dtable[0], &$test_ctx.mem);
};
}
fn set_pkt_len(len: u32, guest_desc: &GuestQDesc, mem: &GuestMemoryMmap) {
let hdr_addr = GuestAddress(guest_desc.addr.get());
let mut hdr: VsockPacketHeader = mem.read_obj(hdr_addr).unwrap();
hdr.len = len.to_le();
mem.write_obj(hdr, hdr_addr).unwrap();
}
#[test]
fn test_packet_hdr_size() {
assert_eq!(
VSOCK_PKT_HDR_SIZE as usize,
std::mem::size_of::<VsockPacketHeader>(),
);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_tx_packet_assembly() {
// Test case: successful TX packet assembly as linux < 6.1 would build them.
{
create_context!(test_ctx, handler_ctx);
let mut pkt = VsockPacketTx::default();
pkt.parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
)
.unwrap();
assert_eq!(
TryInto::<u32>::try_into(pkt.buf_size()).unwrap(),
handler_ctx.guest_txvq.dtable[1].len.get()
);
}
// Test case: error on write-only hdr descriptor.
{
create_context!(test_ctx, handler_ctx);
handler_ctx.guest_txvq.dtable[0]
.flags
.set(VIRTQ_DESC_F_WRITE);
assert!(matches!(
VsockPacketTx::default().parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
),
Err(VsockError::UnreadableDescriptor)
))
}
// Test case: header descriptor has insufficient space to hold the packet header.
{
create_context!(test_ctx, handler_ctx);
handler_ctx.guest_txvq.dtable[0]
.len
.set(VSOCK_PKT_HDR_SIZE - 1);
handler_ctx.guest_txvq.dtable[1].len.set(0);
assert!(matches!(
VsockPacketTx::default().parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
),
Err(VsockError::DescChainTooShortForHeader(_))
))
}
// Test case: zero-length TX packet.
{
create_context!(test_ctx, handler_ctx);
set_pkt_len(0, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
VsockPacketTx::default()
.parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
)
.unwrap();
}
// Test case: TX packet has more data than we can handle.
{
create_context!(test_ctx, handler_ctx);
set_pkt_len(
MAX_PKT_BUF_SIZE + 1,
&handler_ctx.guest_txvq.dtable[0],
&test_ctx.mem,
);
assert!(matches!(
VsockPacketTx::default().parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
),
Err(VsockError::InvalidPktLen(_))
))
}
// Test case:
// - packet header advertises some data length; and
// - the data descriptor is missing.
{
create_context!(test_ctx, handler_ctx);
set_pkt_len(1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
handler_ctx.guest_txvq.dtable[0].flags.set(0);
assert!(matches!(
VsockPacketTx::default().parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
),
Err(VsockError::DescChainTooShortForPacket(44, 1024))
))
}
// Test case: error on write-only buf descriptor.
{
create_context!(test_ctx, handler_ctx);
handler_ctx.guest_txvq.dtable[1]
.flags
.set(VIRTQ_DESC_F_WRITE);
assert!(matches!(
VsockPacketTx::default().parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
),
Err(VsockError::UnreadableDescriptor)
))
}
// Test case: the buffer descriptor cannot fit all the data advertised by the
// packet header `len` field.
{
create_context!(test_ctx, handler_ctx);
set_pkt_len(8 * 1024, &handler_ctx.guest_txvq.dtable[0], &test_ctx.mem);
handler_ctx.guest_txvq.dtable[1].len.set(4 * 1024);
assert!(matches!(
VsockPacketTx::default().parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
),
Err(VsockError::DescChainTooShortForPacket(4140, 8192))
))
}
}
#[test]
fn test_rx_packet_assembly() {
// Test case: successful RX packet assembly.
{
create_context!(test_ctx, handler_ctx);
let mut pkt = VsockPacketRx::new().unwrap();
pkt.parse(
&test_ctx.mem,
handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(),
)
.unwrap();
assert_eq!(pkt.buf_size(), handler_ctx.guest_rxvq.dtable[1].len.get());
}
// Test case: read-only RX packet header.
{
create_context!(test_ctx, handler_ctx);
handler_ctx.guest_rxvq.dtable[0].flags.set(0);
assert!(matches!(
VsockPacketRx::new().unwrap().parse(
&test_ctx.mem,
handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(),
),
Err(VsockError::UnwritableDescriptor)
))
}
// Test case: RX descriptor chain cannot fit packet header
{
create_context!(test_ctx, handler_ctx);
handler_ctx.guest_rxvq.dtable[0]
.len
.set(VSOCK_PKT_HDR_SIZE - 1);
handler_ctx.guest_rxvq.dtable[1].len.set(0);
assert!(matches!(
VsockPacketRx::new().unwrap().parse(
&test_ctx.mem,
handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(),
),
Err(VsockError::DescChainTooShortForHeader(_))
))
}
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_packet_hdr_accessors() {
const SRC_CID: u64 = 1;
const DST_CID: u64 = 2;
const SRC_PORT: u32 = 3;
const DST_PORT: u32 = 4;
const LEN: u32 = 5;
const TYPE: u16 = 6;
const OP: u16 = 7;
const FLAGS: u32 = 8;
const BUF_ALLOC: u32 = 9;
const FWD_CNT: u32 = 10;
let mut hdr = VsockPacketHeader::default();
assert_eq!(hdr.src_cid(), 0);
assert_eq!(hdr.dst_cid(), 0);
assert_eq!(hdr.src_port(), 0);
assert_eq!(hdr.dst_port(), 0);
assert_eq!(hdr.len(), 0);
assert_eq!(hdr.type_(), 0);
assert_eq!(hdr.op(), 0);
assert_eq!(hdr.flags(), 0);
assert_eq!(hdr.buf_alloc(), 0);
assert_eq!(hdr.fwd_cnt(), 0);
// Test field accessors.
hdr.set_src_cid(SRC_CID)
.set_dst_cid(DST_CID)
.set_src_port(SRC_PORT)
.set_dst_port(DST_PORT)
.set_len(LEN)
.set_type(TYPE)
.set_op(OP)
.set_flags(FLAGS)
.set_buf_alloc(BUF_ALLOC)
.set_fwd_cnt(FWD_CNT);
assert_eq!(hdr.src_cid(), SRC_CID);
assert_eq!(hdr.dst_cid(), DST_CID);
assert_eq!(hdr.src_port(), SRC_PORT);
assert_eq!(hdr.dst_port(), DST_PORT);
assert_eq!(hdr.len(), LEN);
assert_eq!(hdr.type_(), TYPE);
assert_eq!(hdr.op(), OP);
assert_eq!(hdr.flags(), FLAGS);
assert_eq!(hdr.buf_alloc(), BUF_ALLOC);
assert_eq!(hdr.fwd_cnt(), FWD_CNT);
// Test individual flag setting.
let flags = hdr.flags() | 0b1000;
hdr.set_flag(0b1000);
assert_eq!(hdr.flags(), flags);
}
#[test]
fn test_packet_buf() {
create_context!(test_ctx, handler_ctx);
// create_context gives us an rx descriptor chain and a tx descriptor chain pointing to the
// same area of memory. We need both a rx-view and a tx-view into the packet, as tx-queue
// buffers are read only, while rx queue buffers are write-only
let mut pkt = VsockPacketRx::new().unwrap();
pkt.parse(
&test_ctx.mem,
handler_ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap(),
)
.unwrap();
let mut pkt2 = VsockPacketTx::default();
pkt2.parse(
&test_ctx.mem,
handler_ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap(),
)
.unwrap();
let buf_desc = &mut handler_ctx.guest_rxvq.dtable[1];
assert_eq!(pkt.buf_size(), buf_desc.len.get());
let zeros = vec![0_u8; pkt.buf_size() as usize];
let data: Vec<u8> = (0..pkt.buf_size())
.map(|i| ((i as u64) & 0xff) as u8)
.collect();
for offset in 0..pkt.buf_size() {
buf_desc.set_data(&zeros);
let mut expected_data = zeros[..offset as usize].to_vec();
expected_data.extend_from_slice(&data[..(pkt.buf_size() - offset) as usize]);
pkt.read_at_offset_from(&mut data.as_slice(), offset, pkt.buf_size() - offset)
.unwrap();
buf_desc.check_data(&expected_data);
let mut buf = vec![0; pkt.buf_size() as usize];
pkt2.write_from_offset_to(&mut buf.as_mut_slice(), offset, pkt.buf_size() - offset)
.unwrap();
assert_eq!(
&buf[..(pkt.buf_size() - offset) as usize],
&expected_data[offset as usize..]
);
}
let oob_cases = vec![
(1, pkt.buf_size()),
(pkt.buf_size(), 1),
(u32::MAX, 1),
(1, u32::MAX),
];
let mut buf = vec![0; pkt.buf_size() as usize];
for (offset, count) in oob_cases {
let res = pkt.read_at_offset_from(&mut data.as_slice(), offset, count);
assert!(matches!(res, Err(VsockError::GuestMemoryBounds)));
let res = pkt2.write_from_offset_to(&mut buf.as_mut_slice(), offset, count);
assert!(matches!(res, Err(VsockError::GuestMemoryBounds)));
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/persist.rs | src/vmm/src/devices/virtio/vsock/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines state and support structures for persisting Vsock devices and backends.
use std::fmt::Debug;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use super::*;
use crate::devices::virtio::device::{ActiveState, DeviceState};
use crate::devices::virtio::generated::virtio_ids::{self, VIRTIO_ID_VSOCK};
use crate::devices::virtio::persist::VirtioDeviceState;
use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE;
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::snapshot::Persist;
use crate::vstate::memory::GuestMemoryMmap;
/// The Vsock serializable state.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VsockState {
/// The vsock backend state.
pub backend: VsockBackendState,
/// The vsock frontend state.
pub frontend: VsockFrontendState,
}
/// The Vsock frontend serializable state.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VsockFrontendState {
/// Context Identifier.
pub cid: u64,
pub virtio_state: VirtioDeviceState,
}
/// An enum for the serializable backend state types.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum VsockBackendState {
/// UDS backend state.
Uds(VsockUdsState),
}
/// The Vsock Unix Backend serializable state.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VsockUdsState {
/// The path for the UDS socket.
pub(crate) path: String,
}
/// A helper structure that holds the constructor arguments for VsockUnixBackend
#[derive(Debug)]
pub struct VsockConstructorArgs<B> {
/// Pointer to guest memory.
pub mem: GuestMemoryMmap,
/// The vsock Unix Backend.
pub backend: B,
}
/// A helper structure that holds the constructor arguments for VsockUnixBackend
#[derive(Debug)]
pub struct VsockUdsConstructorArgs {
/// cid available in VsockFrontendState.
pub cid: u64,
}
impl Persist<'_> for VsockUnixBackend {
type State = VsockBackendState;
type ConstructorArgs = VsockUdsConstructorArgs;
type Error = VsockUnixBackendError;
fn save(&self) -> Self::State {
VsockBackendState::Uds(VsockUdsState {
path: self.host_sock_path.clone(),
})
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
match state {
VsockBackendState::Uds(uds_state) => Ok(VsockUnixBackend::new(
constructor_args.cid,
uds_state.path.clone(),
)?),
}
}
}
impl<B> Persist<'_> for Vsock<B>
where
B: VsockBackend + 'static + Debug,
{
type State = VsockFrontendState;
type ConstructorArgs = VsockConstructorArgs<B>;
type Error = VsockError;
fn save(&self) -> Self::State {
VsockFrontendState {
cid: self.cid(),
virtio_state: VirtioDeviceState::from_device(self),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
// Restore queues.
let queues = state
.virtio_state
.build_queues_checked(
&constructor_args.mem,
VIRTIO_ID_VSOCK,
defs::VSOCK_NUM_QUEUES,
FIRECRACKER_MAX_QUEUE_SIZE,
)
.map_err(VsockError::VirtioState)?;
let mut vsock = Self::with_queues(state.cid, constructor_args.backend, queues)?;
vsock.acked_features = state.virtio_state.acked_features;
vsock.avail_features = state.virtio_state.avail_features;
vsock.device_state = DeviceState::Inactive;
Ok(vsock)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::device::AVAIL_FEATURES;
use super::*;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::test_utils::default_interrupt;
use crate::devices::virtio::vsock::defs::uapi;
use crate::devices::virtio::vsock::test_utils::{TestBackend, TestContext};
use crate::snapshot::Snapshot;
use crate::utils::byte_order;
impl Persist<'_> for TestBackend {
type State = VsockBackendState;
type ConstructorArgs = VsockUdsConstructorArgs;
type Error = VsockUnixBackendError;
fn save(&self) -> Self::State {
VsockBackendState::Uds(VsockUdsState {
path: "test".to_owned(),
})
}
fn restore(_: Self::ConstructorArgs, state: &Self::State) -> Result<Self, Self::Error> {
match state {
VsockBackendState::Uds(_) => Ok(TestBackend::new()),
}
}
}
#[test]
fn test_persist_uds_backend() {
let ctx = TestContext::new();
let device_features = AVAIL_FEATURES;
let driver_features: u64 = AVAIL_FEATURES | 1 | (1 << 32);
let device_pages = [
(device_features & 0xffff_ffff) as u32,
(device_features >> 32) as u32,
];
let driver_pages = [
(driver_features & 0xffff_ffff) as u32,
(driver_features >> 32) as u32,
];
// Test serialization
let mut mem = vec![0; 4096];
// Save backend and device state separately.
let state = VsockState {
backend: ctx.device.backend().save(),
frontend: ctx.device.save(),
};
Snapshot::new(&state).save(&mut mem.as_mut_slice()).unwrap();
let restored_state: VsockState = Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data;
let mut restored_device = Vsock::restore(
VsockConstructorArgs {
mem: ctx.mem.clone(),
backend: match restored_state.backend {
VsockBackendState::Uds(uds_state) => {
assert_eq!(uds_state.path, "test".to_owned());
TestBackend::new()
}
},
},
&restored_state.frontend,
)
.unwrap();
assert_eq!(restored_device.device_type(), VIRTIO_ID_VSOCK);
assert_eq!(restored_device.avail_features_by_page(0), device_pages[0]);
assert_eq!(restored_device.avail_features_by_page(1), device_pages[1]);
assert_eq!(restored_device.avail_features_by_page(2), 0);
restored_device.ack_features_by_page(0, driver_pages[0]);
restored_device.ack_features_by_page(1, driver_pages[1]);
restored_device.ack_features_by_page(2, 0);
restored_device.ack_features_by_page(0, !driver_pages[0]);
assert_eq!(
restored_device.acked_features(),
device_features & driver_features
);
// Test reading 32-bit chunks.
let mut data = [0u8; 8];
restored_device.read_config(0, &mut data[..4]);
assert_eq!(
u64::from(byte_order::read_le_u32(&data[..])),
ctx.cid & 0xffff_ffff
);
restored_device.read_config(4, &mut data[4..]);
assert_eq!(
u64::from(byte_order::read_le_u32(&data[4..])),
(ctx.cid >> 32) & 0xffff_ffff
);
// Test reading 64-bit.
let mut data = [0u8; 8];
restored_device.read_config(0, &mut data);
assert_eq!(byte_order::read_le_u64(&data), ctx.cid);
// Check that out-of-bounds reading doesn't mutate the destination buffer.
let mut data = [0u8, 1, 2, 3, 4, 5, 6, 7];
restored_device.read_config(2, &mut data);
assert_eq!(data, [0u8, 1, 2, 3, 4, 5, 6, 7]);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/device.rs | src/vmm/src/devices/virtio/vsock/device.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! This is the `VirtioDevice` implementation for our vsock device. It handles the virtio-level
//! device logic: feature negotiation, device configuration, and device activation.
//!
//! We aim to conform to the VirtIO v1.1 spec:
//! https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.html
//!
//! The vsock device has two input parameters: a CID to identify the device, and a
//! `VsockBackend` to use for offloading vsock traffic.
//!
//! Upon its activation, the vsock device registers handlers for the following events/FDs:
//! - an RX queue FD;
//! - a TX queue FD;
//! - an event queue FD; and
//! - a backend FD.
use std::fmt::Debug;
use std::ops::Deref;
use std::sync::Arc;
use log::{error, info, warn};
use vmm_sys_util::eventfd::EventFd;
use super::super::super::DeviceError;
use super::defs::uapi;
use super::packet::{VSOCK_PKT_HDR_SIZE, VsockPacketRx, VsockPacketTx};
use super::{VsockBackend, defs};
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::device::{ActiveState, DeviceState, VirtioDevice};
use crate::devices::virtio::generated::virtio_config::{VIRTIO_F_IN_ORDER, VIRTIO_F_VERSION_1};
use crate::devices::virtio::generated::virtio_ids::VIRTIO_ID_VSOCK;
use crate::devices::virtio::queue::{InvalidAvailIdx, Queue as VirtQueue};
use crate::devices::virtio::transport::{VirtioInterrupt, VirtioInterruptType};
use crate::devices::virtio::vsock::VsockError;
use crate::devices::virtio::vsock::metrics::METRICS;
use crate::impl_device_type;
use crate::logger::IncMetric;
use crate::utils::byte_order;
use crate::vstate::memory::{Bytes, GuestMemoryMmap};
pub(crate) const RXQ_INDEX: usize = 0;
pub(crate) const TXQ_INDEX: usize = 1;
pub(crate) const EVQ_INDEX: usize = 2;
pub(crate) const VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: u32 = 0;
/// The virtio features supported by our vsock device:
/// - VIRTIO_F_VERSION_1: the device conforms to at least version 1.0 of the VirtIO spec.
/// - VIRTIO_F_IN_ORDER: the device returns used buffers in the same order that the driver makes
/// them available.
pub(crate) const AVAIL_FEATURES: u64 =
(1 << VIRTIO_F_VERSION_1 as u64) | (1 << VIRTIO_F_IN_ORDER as u64);
/// Structure representing the vsock device.
#[derive(Debug)]
pub struct Vsock<B> {
cid: u64,
pub(crate) queues: Vec<VirtQueue>,
pub(crate) queue_events: Vec<EventFd>,
pub(crate) backend: B,
pub(crate) avail_features: u64,
pub(crate) acked_features: u64,
// This EventFd is the only one initially registered for a vsock device, and is used to convert
// a VirtioDevice::activate call into an EventHandler read event which allows the other events
// (queue and backend related) to be registered post virtio device activation. That's
// mostly something we wanted to happen for the backend events, to prevent (potentially)
// continuous triggers from happening before the device gets activated.
pub(crate) activate_evt: EventFd,
pub(crate) device_state: DeviceState,
pub rx_packet: VsockPacketRx,
pub tx_packet: VsockPacketTx,
}
// TODO: Detect / handle queue deadlock:
// 1. If the driver halts RX queue processing, we'll need to notify `self.backend`, so that it can
// unregister any EPOLLIN listeners, since otherwise it will keep spinning, unable to consume its
// EPOLLIN events.
impl<B> Vsock<B>
where
B: VsockBackend + Debug,
{
/// Auxiliary function for creating a new virtio-vsock device with the given VM CID, vsock
/// backend and empty virtio queues.
pub fn with_queues(
cid: u64,
backend: B,
queues: Vec<VirtQueue>,
) -> Result<Vsock<B>, VsockError> {
let mut queue_events = Vec::new();
for _ in 0..queues.len() {
queue_events.push(EventFd::new(libc::EFD_NONBLOCK).map_err(VsockError::EventFd)?);
}
Ok(Vsock {
cid,
queues,
queue_events,
backend,
avail_features: AVAIL_FEATURES,
acked_features: 0,
activate_evt: EventFd::new(libc::EFD_NONBLOCK).map_err(VsockError::EventFd)?,
device_state: DeviceState::Inactive,
rx_packet: VsockPacketRx::new()?,
tx_packet: VsockPacketTx::default(),
})
}
/// Create a new virtio-vsock device with the given VM CID and vsock backend.
pub fn new(cid: u64, backend: B) -> Result<Vsock<B>, VsockError> {
let queues: Vec<VirtQueue> = defs::VSOCK_QUEUE_SIZES
.iter()
.map(|&max_size| VirtQueue::new(max_size))
.collect();
Self::with_queues(cid, backend, queues)
}
/// Provides the ID of this vsock device as used in MMIO device identification.
pub fn id(&self) -> &str {
defs::VSOCK_DEV_ID
}
/// Retrieve the cid associated with this vsock device.
pub fn cid(&self) -> u64 {
self.cid
}
/// Access the backend behind the device.
pub fn backend(&self) -> &B {
&self.backend
}
/// Signal the guest driver that we've used some virtio buffers that it had previously made
/// available.
pub fn signal_used_queue(&self, qidx: usize) -> Result<(), DeviceError> {
self.device_state
.active_state()
.expect("Device is not initialized")
.interrupt
.trigger(VirtioInterruptType::Queue(qidx.try_into().unwrap_or_else(
|_| panic!("vsock: invalid queue index: {qidx}"),
)))
.map_err(DeviceError::FailedSignalingIrq)
}
/// Signal the guest which queues are ready to be consumed
pub fn signal_used_queues(&self, used_queues: &[u16]) -> Result<(), DeviceError> {
self.device_state
.active_state()
.expect("Device is not initialized")
.interrupt
.trigger_queues(used_queues)
.map_err(DeviceError::FailedSignalingIrq)
}
/// Walk the driver-provided RX queue buffers and attempt to fill them up with any data that we
/// have pending. Return `true` if descriptors have been added to the used ring, and `false`
/// otherwise.
pub fn process_rx(&mut self) -> Result<bool, InvalidAvailIdx> {
// This is safe since we checked in the event handler that the device is activated.
let mem = &self.device_state.active_state().unwrap().mem;
let queue = &mut self.queues[RXQ_INDEX];
let mut have_used = false;
while let Some(head) = queue.pop()? {
let index = head.index;
let used_len = match self.rx_packet.parse(mem, head) {
Ok(()) => {
if self.backend.recv_pkt(&mut self.rx_packet).is_ok() {
match self.rx_packet.commit_hdr() {
// This addition cannot overflow, because packet length
// is previously validated against `MAX_PKT_BUF_SIZE`
// bound as part of `commit_hdr()`.
Ok(()) => VSOCK_PKT_HDR_SIZE + self.rx_packet.hdr.len(),
Err(err) => {
warn!(
"vsock: Error writing packet header to guest memory: \
{:?}.Discarding the package.",
err
);
0
}
}
} else {
// We are using a consuming iterator over the virtio buffers, so, if we
// can't fill in this buffer, we'll need to undo the
// last iterator step.
queue.undo_pop();
break;
}
}
Err(err) => {
warn!("vsock: RX queue error: {:?}. Discarding the package.", err);
0
}
};
have_used = true;
queue.add_used(index, used_len).unwrap_or_else(|err| {
error!("Failed to add available descriptor {}: {}", index, err)
});
}
queue.advance_used_ring_idx();
Ok(have_used)
}
/// Walk the driver-provided TX queue buffers, package them up as vsock packets, and send them
/// to the backend for processing. Return `true` if descriptors have been added to the used
/// ring, and `false` otherwise.
pub fn process_tx(&mut self) -> Result<bool, InvalidAvailIdx> {
// This is safe since we checked in the event handler that the device is activated.
let mem = &self.device_state.active_state().unwrap().mem;
let queue = &mut self.queues[TXQ_INDEX];
let mut have_used = false;
while let Some(head) = queue.pop()? {
let index = head.index;
// let pkt = match VsockPacket::from_tx_virtq_head(mem, head) {
match self.tx_packet.parse(mem, head) {
Ok(()) => (),
Err(err) => {
error!("vsock: error reading TX packet: {:?}", err);
have_used = true;
queue.add_used(index, 0).unwrap_or_else(|err| {
error!("Failed to add available descriptor {}: {}", index, err);
});
continue;
}
};
if self.backend.send_pkt(&self.tx_packet).is_err() {
queue.undo_pop();
break;
}
have_used = true;
queue.add_used(index, 0).unwrap_or_else(|err| {
error!("Failed to add available descriptor {}: {}", index, err);
});
}
queue.advance_used_ring_idx();
Ok(have_used)
}
// Send TRANSPORT_RESET_EVENT to driver. According to specs, the driver shuts down established
// connections and the guest_cid configuration field is fetched again. Existing listen sockets
// remain but their CID is updated to reflect the current guest_cid.
pub fn send_transport_reset_event(&mut self) -> Result<(), DeviceError> {
// This is safe since we checked in the caller function that the device is activated.
let mem = &self.device_state.active_state().unwrap().mem;
let queue = &mut self.queues[EVQ_INDEX];
let head = queue.pop()?.ok_or_else(|| {
METRICS.ev_queue_event_fails.inc();
DeviceError::VsockError(VsockError::EmptyQueue)
})?;
mem.write_obj::<u32>(VIRTIO_VSOCK_EVENT_TRANSPORT_RESET, head.addr)
.unwrap_or_else(|err| error!("Failed to write virtio vsock reset event: {:?}", err));
queue.add_used(head.index, head.len).unwrap_or_else(|err| {
error!("Failed to add used descriptor {}: {}", head.index, err);
});
queue.advance_used_ring_idx();
self.signal_used_queue(EVQ_INDEX)?;
Ok(())
}
}
impl<B> VirtioDevice for Vsock<B>
where
B: VsockBackend + Debug + 'static,
{
impl_device_type!(VIRTIO_ID_VSOCK);
fn avail_features(&self) -> u64 {
self.avail_features
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn set_acked_features(&mut self, acked_features: u64) {
self.acked_features = acked_features
}
fn queues(&self) -> &[VirtQueue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [VirtQueue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_events
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.device_state
.active_state()
.expect("Device is not initialized")
.interrupt
.deref()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
match offset {
0 if data.len() == 8 => byte_order::write_le_u64(data, self.cid()),
0 if data.len() == 4 => {
byte_order::write_le_u32(data, (self.cid() & 0xffff_ffff) as u32)
}
4 if data.len() == 4 => {
byte_order::write_le_u32(data, ((self.cid() >> 32) & 0xffff_ffff) as u32)
}
_ => {
METRICS.cfg_fails.inc();
warn!(
"vsock: virtio-vsock received invalid read request of {} bytes at offset {}",
data.len(),
offset
)
}
}
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
METRICS.cfg_fails.inc();
warn!(
"vsock: guest driver attempted to write device config (offset={:#x}, len={:#x})",
offset,
data.len()
);
}
fn activate(
&mut self,
mem: GuestMemoryMmap,
interrupt: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
for q in self.queues.iter_mut() {
q.initialize(&mem)
.map_err(ActivateError::QueueMemoryError)?;
}
if self.queues.len() != defs::VSOCK_NUM_QUEUES {
METRICS.activate_fails.inc();
return Err(ActivateError::QueueMismatch {
expected: defs::VSOCK_NUM_QUEUES,
got: self.queues.len(),
});
}
if self.activate_evt.write(1).is_err() {
METRICS.activate_fails.inc();
return Err(ActivateError::EventFd);
}
self.device_state = DeviceState::Activated(ActiveState { mem, interrupt });
Ok(())
}
fn is_activated(&self) -> bool {
self.device_state.is_activated()
}
fn kick(&mut self) {
// Vsock has complicated protocol that isn't resilient to any packet loss,
// so for Vsock we don't support connection persistence through snapshot.
// Any in-flight packets or events are simply lost.
// Vsock is restored 'empty'.
// The only reason we still `kick` it is to make guest process
// `TRANSPORT_RESET_EVENT` event we sent during snapshot creation.
if self.is_activated() {
info!("kick vsock {}.", self.id());
self.signal_used_queue(EVQ_INDEX).unwrap();
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::devices::virtio::vsock::defs::uapi;
use crate::devices::virtio::vsock::test_utils::TestContext;
#[test]
fn test_virtio_device() {
let mut ctx = TestContext::new();
let device_features = AVAIL_FEATURES;
let driver_features: u64 = AVAIL_FEATURES | 1 | (1 << 32);
let device_pages = [
(device_features & 0xffff_ffff) as u32,
(device_features >> 32) as u32,
];
let driver_pages = [
(driver_features & 0xffff_ffff) as u32,
(driver_features >> 32) as u32,
];
assert_eq!(ctx.device.device_type(), VIRTIO_ID_VSOCK);
assert_eq!(ctx.device.avail_features_by_page(0), device_pages[0]);
assert_eq!(ctx.device.avail_features_by_page(1), device_pages[1]);
assert_eq!(ctx.device.avail_features_by_page(2), 0);
// Ack device features, page 0.
ctx.device.ack_features_by_page(0, driver_pages[0]);
// Ack device features, page 1.
ctx.device.ack_features_by_page(1, driver_pages[1]);
// Ack some bogus page (i.e. 2). This should have no side effect.
ctx.device.ack_features_by_page(2, 0);
// Attempt to un-ack the first feature page. This should have no side effect.
ctx.device.ack_features_by_page(0, !driver_pages[0]);
// Check that no side effect are present, and that the acked features are exactly the same
// as the device features.
assert_eq!(ctx.device.acked_features, device_features & driver_features);
// Test reading 32-bit chunks.
let mut data = [0u8; 8];
ctx.device.read_config(0, &mut data[..4]);
assert_eq!(
u64::from(byte_order::read_le_u32(&data[..])),
ctx.cid & 0xffff_ffff
);
ctx.device.read_config(4, &mut data[4..]);
assert_eq!(
u64::from(byte_order::read_le_u32(&data[4..])),
(ctx.cid >> 32) & 0xffff_ffff
);
// Test reading 64-bit.
let mut data = [0u8; 8];
ctx.device.read_config(0, &mut data);
assert_eq!(byte_order::read_le_u64(&data), ctx.cid);
// Check that out-of-bounds reading doesn't mutate the destination buffer.
let mut data = [0u8, 1, 2, 3, 4, 5, 6, 7];
ctx.device.read_config(2, &mut data);
assert_eq!(data, [0u8, 1, 2, 3, 4, 5, 6, 7]);
// Just covering lines here, since the vsock device has no writable config.
// A warning is, however, logged, if the guest driver attempts to write any config data.
ctx.device.write_config(0, &data[..4]);
// Test a bad activation.
// let bad_activate = ctx.device.activate(
// ctx.mem.clone(),
// );
// match bad_activate {
// Err(ActivateError::BadActivate) => (),
// other => panic!("{:?}", other),
// }
// Test a correct activation.
ctx.device
.activate(ctx.mem.clone(), ctx.interrupt.clone())
.unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/test_utils.rs | src/vmm/src/devices/virtio/vsock/test_utils.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![cfg(test)]
#![doc(hidden)]
use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::Arc;
use vmm_sys_util::epoll::EventSet;
use vmm_sys_util::eventfd::EventFd;
use super::packet::{VsockPacketRx, VsockPacketTx};
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::queue::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use crate::devices::virtio::test_utils::{VirtQueue as GuestQ, default_interrupt};
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::devices::virtio::vsock::device::{RXQ_INDEX, TXQ_INDEX};
use crate::devices::virtio::vsock::packet::VSOCK_PKT_HDR_SIZE;
use crate::devices::virtio::vsock::{
Vsock, VsockBackend, VsockChannel, VsockEpollListener, VsockError,
};
use crate::test_utils::single_region_mem;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
#[derive(Debug)]
pub struct TestBackend {
pub evfd: EventFd,
pub rx_err: Option<VsockError>,
pub tx_err: Option<VsockError>,
pub pending_rx: bool,
pub rx_ok_cnt: usize,
pub tx_ok_cnt: usize,
pub evset: Option<EventSet>,
}
impl TestBackend {
pub fn new() -> Self {
Self {
evfd: EventFd::new(libc::EFD_NONBLOCK).unwrap(),
rx_err: None,
tx_err: None,
pending_rx: false,
rx_ok_cnt: 0,
tx_ok_cnt: 0,
evset: None,
}
}
pub fn set_rx_err(&mut self, err: Option<VsockError>) {
self.rx_err = err;
}
pub fn set_tx_err(&mut self, err: Option<VsockError>) {
self.tx_err = err;
}
pub fn set_pending_rx(&mut self, prx: bool) {
self.pending_rx = prx;
}
}
impl Default for TestBackend {
fn default() -> Self {
Self::new()
}
}
impl VsockChannel for TestBackend {
fn recv_pkt(&mut self, pkt: &mut VsockPacketRx) -> Result<(), VsockError> {
let cool_buf = [0xDu8, 0xE, 0xA, 0xD, 0xB, 0xE, 0xE, 0xF];
match self.rx_err.take() {
None => {
let buf_size = pkt.buf_size();
if buf_size > 0 {
let buf: Vec<u8> = (0..buf_size)
.map(|i| cool_buf[i as usize % cool_buf.len()])
.collect();
pkt.read_at_offset_from(&mut buf.as_slice(), 0, buf_size)
.unwrap();
}
self.rx_ok_cnt += 1;
Ok(())
}
Some(err) => Err(err),
}
}
fn send_pkt(&mut self, _pkt: &VsockPacketTx) -> Result<(), VsockError> {
match self.tx_err.take() {
None => {
self.tx_ok_cnt += 1;
Ok(())
}
Some(err) => Err(err),
}
}
fn has_pending_rx(&self) -> bool {
self.pending_rx
}
}
impl AsRawFd for TestBackend {
fn as_raw_fd(&self) -> RawFd {
self.evfd.as_raw_fd()
}
}
impl VsockEpollListener for TestBackend {
fn get_polled_evset(&self) -> EventSet {
EventSet::IN
}
fn notify(&mut self, evset: EventSet) {
self.evset = Some(evset);
}
}
impl VsockBackend for TestBackend {}
#[derive(Debug)]
pub struct TestContext {
pub cid: u64,
pub mem: GuestMemoryMmap,
pub interrupt: Arc<dyn VirtioInterrupt>,
pub mem_size: usize,
pub device: Vsock<TestBackend>,
}
impl TestContext {
pub fn new() -> Self {
const CID: u64 = 52;
const MEM_SIZE: usize = 1024 * 1024 * 128;
let mem = single_region_mem(MEM_SIZE);
let mut device = Vsock::new(CID, TestBackend::new()).unwrap();
for q in device.queues_mut() {
q.ready = true;
q.size = q.max_size;
}
Self {
cid: CID,
mem,
interrupt: default_interrupt(),
mem_size: MEM_SIZE,
device,
}
}
pub fn create_event_handler_context(&self) -> EventHandlerContext<'_> {
const QSIZE: u16 = 256;
let guest_rxvq = GuestQ::new(GuestAddress(0x0010_0000), &self.mem, QSIZE);
let guest_txvq = GuestQ::new(GuestAddress(0x0020_0000), &self.mem, QSIZE);
let guest_evvq = GuestQ::new(GuestAddress(0x0030_0000), &self.mem, QSIZE);
let rxvq = guest_rxvq.create_queue();
let txvq = guest_txvq.create_queue();
let evvq = guest_evvq.create_queue();
// Set up one available descriptor in the RX queue.
guest_rxvq.dtable[0].set(
0x0040_0000,
VSOCK_PKT_HDR_SIZE,
VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_NEXT,
1,
);
guest_rxvq.dtable[1].set(0x0040_1000, 4096, VIRTQ_DESC_F_WRITE, 0);
guest_rxvq.avail.ring[0].set(0);
guest_rxvq.avail.idx.set(1);
// Set up one available descriptor in the TX queue.
guest_txvq.dtable[0].set(0x0040_0000, VSOCK_PKT_HDR_SIZE, VIRTQ_DESC_F_NEXT, 1);
guest_txvq.dtable[1].set(0x0040_1000, 4096, 0, 0);
guest_txvq.avail.ring[0].set(0);
guest_txvq.avail.idx.set(1);
// Both descriptors above point to the same area of guest memory, to work around
// the fact that through the TX queue, the memory is read-only, and through the RX queue,
// the memory is write-only.
let queues = vec![rxvq, txvq, evvq];
EventHandlerContext {
guest_rxvq,
guest_txvq,
guest_evvq,
device: Vsock::with_queues(self.cid, TestBackend::new(), queues).unwrap(),
}
}
}
impl Default for TestContext {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug)]
pub struct EventHandlerContext<'a> {
pub device: Vsock<TestBackend>,
pub guest_rxvq: GuestQ<'a>,
pub guest_txvq: GuestQ<'a>,
pub guest_evvq: GuestQ<'a>,
}
impl EventHandlerContext<'_> {
pub fn mock_activate(&mut self, mem: GuestMemoryMmap, interrupt: Arc<dyn VirtioInterrupt>) {
// Artificially activate the device.
self.device.activate(mem, interrupt).unwrap();
}
pub fn signal_txq_event(&mut self) {
self.device.queue_events[TXQ_INDEX].write(1).unwrap();
self.device.handle_txq_event(EventSet::IN);
}
pub fn signal_rxq_event(&mut self) {
self.device.queue_events[RXQ_INDEX].write(1).unwrap();
self.device.handle_rxq_event(EventSet::IN);
}
}
#[cfg(test)]
pub fn read_packet_data(pkt: &VsockPacketTx, how_much: u32) -> Vec<u8> {
let mut buf = vec![0; how_much as usize];
pkt.write_from_offset_to(&mut buf.as_mut_slice(), 0, how_much)
.unwrap();
buf
}
impl<B> Vsock<B>
where
B: VsockBackend,
{
pub fn write_element_in_queue(vsock: &Vsock<B>, idx: usize, val: u64) {
if idx > vsock.queue_events.len() - 1 {
panic!("Index bigger than the number of queues of this device");
}
vsock.queue_events[idx].write(val).unwrap();
}
pub fn get_element_from_interest_list(vsock: &Vsock<B>, idx: usize) -> u64 {
match idx {
0..=2 => u64::try_from(vsock.queue_events[idx].as_raw_fd()).unwrap(),
3 => u64::try_from(vsock.backend.as_raw_fd()).unwrap(),
4 => u64::try_from(vsock.activate_evt.as_raw_fd()).unwrap(),
_ => panic!("Index bigger than interest list"),
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/mod.rs | src/vmm/src/devices/virtio/vsock/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! The Firecracker vsock device aims to provide full virtio-vsock support to
//! software running inside the guest VM, while bypassing vhost kernel code on the
//! host. To that end, Firecracker implements the virtio-vsock device model, and
//! mediates communication between AF_UNIX sockets (on the host end) and AF_VSOCK
//! sockets (on the guest end).
mod csm;
mod device;
mod event_handler;
pub mod metrics;
mod packet;
pub mod persist;
pub mod test_utils;
mod unix;
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryError;
use vmm_sys_util::epoll::EventSet;
pub use self::defs::VSOCK_DEV_ID;
pub use self::device::Vsock;
use self::packet::{VsockPacketRx, VsockPacketTx};
pub use self::unix::{VsockUnixBackend, VsockUnixBackendError};
use super::iov_deque::IovDequeError;
use crate::devices::virtio::iovec::IoVecError;
use crate::devices::virtio::persist::PersistError as VirtioStateError;
mod defs {
use crate::devices::virtio::queue::FIRECRACKER_MAX_QUEUE_SIZE;
/// Device ID used in MMIO device identification.
/// Because Vsock is unique per-vm, this ID can be hardcoded.
pub const VSOCK_DEV_ID: &str = "vsock";
/// Number of virtio queues.
pub const VSOCK_NUM_QUEUES: usize = 3;
/// Virtio queue sizes, in number of descriptor chain heads.
/// There are 3 queues for a virtio device (in this order): RX, TX, Event
pub const VSOCK_QUEUE_SIZES: [u16; VSOCK_NUM_QUEUES] = [
FIRECRACKER_MAX_QUEUE_SIZE,
FIRECRACKER_MAX_QUEUE_SIZE,
FIRECRACKER_MAX_QUEUE_SIZE,
];
/// Max vsock packet data/buffer size.
pub const MAX_PKT_BUF_SIZE: u32 = 64 * 1024;
pub mod uapi {
/// Vsock packet operation IDs.
/// Defined in `/include/uapi/linux/virtio_vsock.h`.
///
/// Connection request.
pub const VSOCK_OP_REQUEST: u16 = 1;
/// Connection response.
pub const VSOCK_OP_RESPONSE: u16 = 2;
/// Connection reset.
pub const VSOCK_OP_RST: u16 = 3;
/// Connection clean shutdown.
pub const VSOCK_OP_SHUTDOWN: u16 = 4;
/// Connection data (read/write).
pub const VSOCK_OP_RW: u16 = 5;
/// Flow control credit update.
pub const VSOCK_OP_CREDIT_UPDATE: u16 = 6;
/// Flow control credit update request.
pub const VSOCK_OP_CREDIT_REQUEST: u16 = 7;
/// Vsock packet flags.
/// Defined in `/include/uapi/linux/virtio_vsock.h`.
///
/// Valid with a VSOCK_OP_SHUTDOWN packet: the packet sender will receive no more data.
pub const VSOCK_FLAGS_SHUTDOWN_RCV: u32 = 1;
/// Valid with a VSOCK_OP_SHUTDOWN packet: the packet sender will send no more data.
pub const VSOCK_FLAGS_SHUTDOWN_SEND: u32 = 2;
/// Vsock packet type.
/// Defined in `/include/uapi/linux/virtio_vsock.h`.
///
/// Stream / connection-oriented packet (the only currently valid type).
pub const VSOCK_TYPE_STREAM: u16 = 1;
pub const VSOCK_HOST_CID: u64 = 2;
}
}
/// Vsock device related errors.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
#[rustfmt::skip]
pub enum VsockError {
/** The total length of the descriptor chain ({0}) is too short to hold a packet of length {1} + header */
DescChainTooShortForPacket(u32, u32),
/// Empty queue
EmptyQueue,
/// EventFd error: {0}
EventFd(std::io::Error),
/// Chained GuestMemoryMmap error: {0}
GuestMemoryMmap(GuestMemoryError),
/// Bounds check failed on guest memory pointer.
GuestMemoryBounds,
/** The total length of the descriptor chain ({0}) is less than the number of bytes required\
to hold a vsock packet header.*/
DescChainTooShortForHeader(usize),
/// The descriptor chain length was greater than the max ([u32::MAX])
DescChainOverflow,
/// The vsock header `len` field holds an invalid value: {0}
InvalidPktLen(u32),
/// A data fetch was attempted when no data was available.
NoData,
/// A data buffer was expected for the provided packet, but it is missing.
PktBufMissing,
/// Encountered an unexpected write-only virtio descriptor.
UnreadableDescriptor,
/// Encountered an unexpected read-only virtio descriptor.
UnwritableDescriptor,
/// Invalid virtio configuration: {0}
VirtioState(VirtioStateError),
/// Vsock uds backend error: {0}
VsockUdsBackend(VsockUnixBackendError),
/// Underlying IovDeque error: {0}
IovDeque(IovDequeError),
/// Tried to push to full IovDeque.
IovDequeOverflow,
}
impl From<IoVecError> for VsockError {
fn from(value: IoVecError) -> Self {
match value {
IoVecError::WriteOnlyDescriptor => VsockError::UnreadableDescriptor,
IoVecError::ReadOnlyDescriptor => VsockError::UnwritableDescriptor,
IoVecError::GuestMemory(err) => VsockError::GuestMemoryMmap(err),
IoVecError::OverflowedDescriptor => VsockError::DescChainOverflow,
IoVecError::IovDeque(err) => VsockError::IovDeque(err),
IoVecError::IovDequeOverflow => VsockError::IovDequeOverflow,
}
}
}
/// A passive, event-driven object, that needs to be notified whenever an epoll-able event occurs.
/// An event-polling control loop will use `as_raw_fd()` and `get_polled_evset()` to query
/// the listener for the file descriptor and the set of events it's interested in. When such an
/// event occurs, the control loop will route the event to the listener via `notify()`.
pub trait VsockEpollListener: AsRawFd {
/// Get the set of events for which the listener wants to be notified.
fn get_polled_evset(&self) -> EventSet;
/// Notify the listener that one ore more events have occurred.
fn notify(&mut self, evset: EventSet);
}
/// Any channel that handles vsock packet traffic: sending and receiving packets. Since we're
/// implementing the device model here, our responsibility is to always process the sending of
/// packets (i.e. the TX queue). So, any locally generated data, addressed to the driver (e.g.
/// a connection response or RST), will have to be queued, until we get to processing the RX queue.
///
/// Note: `recv_pkt()` and `send_pkt()` are named analogous to `Read::read()` and `Write::write()`,
/// respectively. I.e.
/// - `recv_pkt(&mut pkt)` will read data from the channel, and place it into `pkt`; and
/// - `send_pkt(&pkt)` will fetch data from `pkt`, and place it into the channel.
pub trait VsockChannel {
/// Read/receive an incoming packet from the channel.
fn recv_pkt(&mut self, pkt: &mut VsockPacketRx) -> Result<(), VsockError>;
/// Write/send a packet through the channel.
fn send_pkt(&mut self, pkt: &VsockPacketTx) -> Result<(), VsockError>;
/// Checks whether there is pending incoming data inside the channel, meaning that a subsequent
/// call to `recv_pkt()` won't fail.
fn has_pending_rx(&self) -> bool;
}
/// The vsock backend, which is basically an epoll-event-driven vsock channel.
/// Currently, the only implementation we have is `crate::devices::virtio::unix::muxer::VsockMuxer`,
/// which translates guest-side vsock connections to host-side Unix domain socket connections.
pub trait VsockBackend: VsockChannel + VsockEpollListener + Send {}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/event_handler.rs | src/vmm/src/devices/virtio/vsock/event_handler.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::fmt::Debug;
/// The vsock object implements the runtime logic of our vsock device:
/// 1. Respond to TX queue events by wrapping virtio buffers into `VsockPacket`s, then sending
/// those packets to the `VsockBackend`;
/// 2. Forward backend FD event notifications to the `VsockBackend`;
/// 3. Fetch incoming packets from the `VsockBackend` and place them into the virtio RX queue;
/// 4. Whenever we have processed some virtio buffers (either TX or RX), let the driver know by
/// raising our assigned IRQ.
///
/// In a nutshell, the logic looks like this:
/// - on TX queue event:
/// - fetch all packets from the TX queue and send them to the backend; then
/// - if the backend has queued up any incoming packets, fetch them into any available RX
/// buffers.
/// - on RX queue event:
/// - fetch any incoming packets, queued up by the backend, into newly available RX buffers.
/// - on backend event:
/// - forward the event to the backend; then
/// - again, attempt to fetch any incoming packets queued by the backend into virtio RX
/// buffers.
use event_manager::{EventOps, Events, MutEventSubscriber};
use log::{error, warn};
use vmm_sys_util::epoll::EventSet;
use super::VsockBackend;
use super::device::{EVQ_INDEX, RXQ_INDEX, TXQ_INDEX, Vsock};
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::queue::InvalidAvailIdx;
use crate::devices::virtio::vsock::defs::VSOCK_NUM_QUEUES;
use crate::devices::virtio::vsock::metrics::METRICS;
use crate::logger::IncMetric;
impl<B> Vsock<B>
where
B: Debug + VsockBackend + 'static,
{
const PROCESS_ACTIVATE: u32 = 0;
const PROCESS_RXQ: u32 = 1;
const PROCESS_TXQ: u32 = 2;
const PROCESS_EVQ: u32 = 3;
const PROCESS_NOTIFY_BACKEND: u32 = 4;
pub fn handle_rxq_event(&mut self, evset: EventSet) -> Vec<u16> {
let mut used_queues = Vec::new();
if evset != EventSet::IN {
warn!("vsock: rxq unexpected event {:?}", evset);
METRICS.rx_queue_event_fails.inc();
return used_queues;
}
if let Err(err) = self.queue_events[RXQ_INDEX].read() {
error!("Failed to get vsock rx queue event: {:?}", err);
METRICS.rx_queue_event_fails.inc();
} else if self.backend.has_pending_rx() {
if self.process_rx().unwrap() {
used_queues.push(RXQ_INDEX.try_into().unwrap());
}
METRICS.rx_queue_event_count.inc();
}
used_queues
}
pub fn handle_txq_event(&mut self, evset: EventSet) -> Vec<u16> {
let mut used_queues = Vec::new();
if evset != EventSet::IN {
warn!("vsock: txq unexpected event {:?}", evset);
METRICS.tx_queue_event_fails.inc();
return used_queues;
}
if let Err(err) = self.queue_events[TXQ_INDEX].read() {
error!("Failed to get vsock tx queue event: {:?}", err);
METRICS.tx_queue_event_fails.inc();
} else {
if self.process_tx().unwrap() {
used_queues.push(TXQ_INDEX.try_into().unwrap());
}
METRICS.tx_queue_event_count.inc();
// The backend may have queued up responses to the packets we sent during
// TX queue processing. If that happened, we need to fetch those responses
// and place them into RX buffers.
if self.backend.has_pending_rx() && self.process_rx().unwrap() {
used_queues.push(RXQ_INDEX.try_into().unwrap());
}
}
used_queues
}
pub fn handle_evq_event(&mut self, evset: EventSet) {
if evset != EventSet::IN {
warn!("vsock: evq unexpected event {:?}", evset);
METRICS.ev_queue_event_fails.inc();
return;
}
if let Err(err) = self.queue_events[EVQ_INDEX].read() {
error!("Failed to consume vsock evq event: {:?}", err);
METRICS.ev_queue_event_fails.inc();
}
}
/// Notify backend of new events.
pub fn notify_backend(&mut self, evset: EventSet) -> Result<Vec<u16>, InvalidAvailIdx> {
let mut used_queues = Vec::new();
self.backend.notify(evset);
// After the backend has been kicked, it might've freed up some resources, so we
// can attempt to send it more data to process.
// In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
// returning an error) at some point in the past, now is the time to try walking the
// TX queue again.
if self.process_tx()? {
used_queues.push(TXQ_INDEX.try_into().unwrap());
}
if self.backend.has_pending_rx() && self.process_rx()? {
used_queues.push(RXQ_INDEX.try_into().unwrap())
}
Ok(used_queues)
}
fn register_runtime_events(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.queue_events[RXQ_INDEX],
Self::PROCESS_RXQ,
EventSet::IN,
)) {
error!("Failed to register rx queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.queue_events[TXQ_INDEX],
Self::PROCESS_TXQ,
EventSet::IN,
)) {
error!("Failed to register tx queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.queue_events[EVQ_INDEX],
Self::PROCESS_EVQ,
EventSet::IN,
)) {
error!("Failed to register ev queue event: {}", err);
}
if let Err(err) = ops.add(Events::with_data(
&self.backend,
Self::PROCESS_NOTIFY_BACKEND,
self.backend.get_polled_evset(),
)) {
error!("Failed to register vsock backend event: {}", err);
}
}
fn register_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to register activate event: {}", err);
}
}
fn handle_activate_event(&self, ops: &mut EventOps) {
if let Err(err) = self.activate_evt.read() {
error!("Failed to consume net activate event: {:?}", err);
}
self.register_runtime_events(ops);
if let Err(err) = ops.remove(Events::with_data(
&self.activate_evt,
Self::PROCESS_ACTIVATE,
EventSet::IN,
)) {
error!("Failed to un-register activate event: {}", err);
}
}
}
impl<B> MutEventSubscriber for Vsock<B>
where
B: Debug + VsockBackend + 'static,
{
fn process(&mut self, event: Events, ops: &mut EventOps) {
let source = event.data();
let evset = event.event_set();
if self.is_activated() {
let used_queues = match source {
Self::PROCESS_ACTIVATE => {
self.handle_activate_event(ops);
Vec::new()
}
Self::PROCESS_RXQ => self.handle_rxq_event(evset),
Self::PROCESS_TXQ => self.handle_txq_event(evset),
Self::PROCESS_EVQ => {
self.handle_evq_event(evset);
Vec::new()
}
Self::PROCESS_NOTIFY_BACKEND => self.notify_backend(evset).unwrap(),
_ => {
warn!("Unexpected vsock event received: {:?}", source);
Vec::new()
}
};
self.signal_used_queues(&used_queues)
.expect("vsock: Could not trigger device interrupt");
} else {
warn!(
"Vsock: The device is not yet activated. Spurious event received: {:?}",
source
);
}
}
fn init(&mut self, ops: &mut EventOps) {
// This function can be called during different points in the device lifetime:
// - shortly after device creation,
// - on device activation (is-activated already true at this point),
// - on device restore from snapshot.
if self.is_activated() {
self.register_runtime_events(ops);
} else {
self.register_activate_event(ops);
}
}
}
#[cfg(test)]
mod tests {
use std::sync::{Arc, Mutex};
use event_manager::{EventManager, SubscriberOps};
use super::super::*;
use super::*;
use crate::devices::virtio::vsock::test_utils::{EventHandlerContext, TestContext};
#[test]
fn test_txq_event() {
// Test case:
// - the driver has something to send (there's data in the TX queue); and
// - the backend has no pending RX data.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
ctx.device.backend.set_pending_rx(false);
ctx.signal_txq_event();
// The available TX descriptor should have been used.
assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
// The available RX descriptor should be untouched.
assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
}
// Test case:
// - the driver has something to send (there's data in the TX queue); and
// - the backend also has some pending RX data.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
ctx.device.backend.set_pending_rx(true);
ctx.signal_txq_event();
// Both available RX and TX descriptors should have been used.
assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
}
// Test case:
// - the driver has something to send (there's data in the TX queue); and
// - the backend errors out and cannot process the TX queue.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
ctx.device.backend.set_pending_rx(false);
ctx.device.backend.set_tx_err(Some(VsockError::NoData));
ctx.signal_txq_event();
// Both RX and TX queues should be untouched.
assert_eq!(ctx.guest_txvq.used.idx.get(), 0);
assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
}
// Test case:
// - the driver supplied a malformed TX buffer.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
// Invalidate the descriptor chain, by setting its length to 0.
ctx.guest_txvq.dtable[0].len.set(0);
ctx.guest_txvq.dtable[1].len.set(0);
ctx.signal_txq_event();
// The available descriptor should have been consumed, but no packet should have
// reached the backend.
assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
assert_eq!(ctx.device.backend.tx_ok_cnt, 0);
}
// Test case: spurious TXQ_EVENT.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
let metric_before = METRICS.tx_queue_event_fails.count();
ctx.device.handle_txq_event(EventSet::IN);
assert_eq!(metric_before + 1, METRICS.tx_queue_event_fails.count());
}
}
#[test]
fn test_rxq_event() {
// Test case:
// - there is pending RX data in the backend; and
// - the driver makes RX buffers available; and
// - the backend successfully places its RX data into the queue.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
ctx.device.backend.set_pending_rx(true);
ctx.device.backend.set_rx_err(Some(VsockError::NoData));
ctx.signal_rxq_event();
// The available RX buffer should've been left untouched.
assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
}
// Test case:
// - there is pending RX data in the backend; and
// - the driver makes RX buffers available; and
// - the backend errors out, when attempting to receive data.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
ctx.device.backend.set_pending_rx(true);
ctx.signal_rxq_event();
// The available RX buffer should have been used.
assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
}
// Test case: the driver provided a malformed RX descriptor chain.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
// Invalidate the descriptor chain, by setting its length to 0.
ctx.guest_rxvq.dtable[0].len.set(0);
ctx.guest_rxvq.dtable[1].len.set(0);
// The chain should've been processed, without employing the backend.
assert!(ctx.device.process_rx().unwrap());
assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
assert_eq!(ctx.device.backend.rx_ok_cnt, 0);
}
// Test case: spurious RXQ_EVENT.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
ctx.device.backend.set_pending_rx(false);
let metric_before = METRICS.rx_queue_event_fails.count();
ctx.device.handle_rxq_event(EventSet::IN);
assert_eq!(metric_before + 1, METRICS.rx_queue_event_fails.count());
}
}
#[test]
fn test_evq_event() {
// Test case: spurious EVQ_EVENT.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.device.backend.set_pending_rx(false);
let metric_before = METRICS.ev_queue_event_fails.count();
ctx.device.handle_evq_event(EventSet::IN);
assert_eq!(metric_before + 1, METRICS.ev_queue_event_fails.count());
}
}
#[test]
fn test_backend_event() {
// Test case:
// - a backend event is received; and
// - the backend has pending RX data.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
ctx.device.backend.set_pending_rx(true);
ctx.device.notify_backend(EventSet::IN).unwrap();
// The backend should've received this event.
assert_eq!(ctx.device.backend.evset, Some(EventSet::IN));
// TX queue processing should've been triggered.
assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
// RX queue processing should've been triggered.
assert_eq!(ctx.guest_rxvq.used.idx.get(), 1);
}
// Test case:
// - a backend event is received; and
// - the backend doesn't have any pending RX data.
{
let test_ctx = TestContext::new();
let mut ctx = test_ctx.create_event_handler_context();
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
ctx.device.backend.set_pending_rx(false);
ctx.device.notify_backend(EventSet::IN).unwrap();
// The backend should've received this event.
assert_eq!(ctx.device.backend.evset, Some(EventSet::IN));
// TX queue processing should've been triggered.
assert_eq!(ctx.guest_txvq.used.idx.get(), 1);
// The RX queue should've been left untouched.
assert_eq!(ctx.guest_rxvq.used.idx.get(), 0);
}
}
// Creates an epoll handler context and attempts to assemble a VsockPkt from the descriptor
// chains available on the rx and tx virtqueues, but first it will set the addr and len
// of the descriptor specified by desc_idx to the provided values. We are only using this
// function for testing error cases, so the asserts always expect is_err() to be true. When
// desc_idx = 0 we are altering the header (first descriptor in the chain), and when
// desc_idx = 1 we are altering the packet buffer.
#[cfg(target_arch = "x86_64")]
fn vsock_bof_helper(test_ctx: &mut TestContext, desc_idx: usize, addr: u64, len: u32) {
use crate::vstate::memory::{Bytes, GuestAddress};
assert!(desc_idx <= 1);
{
let mut ctx = test_ctx.create_event_handler_context();
ctx.guest_rxvq.dtable[desc_idx].addr.set(addr);
ctx.guest_rxvq.dtable[desc_idx].len.set(len);
// If the descriptor chain is already declared invalid, there's no reason to assemble
// a packet.
if let Some(rx_desc) = ctx.device.queues[RXQ_INDEX].pop().unwrap() {
VsockPacketRx::new()
.unwrap()
.parse(&test_ctx.mem, rx_desc)
.unwrap_err();
}
}
{
let mut ctx = test_ctx.create_event_handler_context();
// When modifying the buffer descriptor, make sure the len field is altered in the
// vsock packet header descriptor as well.
if desc_idx == 1 {
// The vsock packet len field has offset 24 in the header.
let hdr_len_addr = GuestAddress(ctx.guest_txvq.dtable[0].addr.get() + 24);
test_ctx
.mem
.write_obj(len.to_le_bytes(), hdr_len_addr)
.unwrap();
}
ctx.guest_txvq.dtable[desc_idx].addr.set(addr);
ctx.guest_txvq.dtable[desc_idx].len.set(len);
if let Some(tx_desc) = ctx.device.queues[TXQ_INDEX].pop().unwrap() {
VsockPacketTx::default()
.parse(&test_ctx.mem, tx_desc)
.unwrap_err();
}
}
}
#[test]
#[cfg(target_arch = "x86_64")]
#[allow(clippy::cast_possible_truncation)] /* casting of constants we know fit into u32 */
fn test_vsock_bof() {
use crate::arch::x86_64::layout::FIRST_ADDR_PAST_32BITS;
use crate::arch::{MMIO32_MEM_SIZE, MMIO32_MEM_START};
use crate::devices::virtio::vsock::packet::VSOCK_PKT_HDR_SIZE;
use crate::test_utils::multi_region_mem;
use crate::utils::mib_to_bytes;
use crate::vstate::memory::GuestAddress;
const MIB: usize = mib_to_bytes(1);
let mut test_ctx = TestContext::new();
test_ctx.mem = multi_region_mem(&[
(GuestAddress(0), 8 * MIB),
(GuestAddress(MMIO32_MEM_START - MIB as u64), MIB),
(GuestAddress(FIRST_ADDR_PAST_32BITS), MIB),
]);
// The default configured descriptor chains are valid.
{
let mut ctx = test_ctx.create_event_handler_context();
let rx_desc = ctx.device.queues[RXQ_INDEX].pop().unwrap().unwrap();
VsockPacketRx::new()
.unwrap()
.parse(&test_ctx.mem, rx_desc)
.unwrap();
}
{
let mut ctx = test_ctx.create_event_handler_context();
let tx_desc = ctx.device.queues[TXQ_INDEX].pop().unwrap().unwrap();
VsockPacketTx::default()
.parse(&test_ctx.mem, tx_desc)
.unwrap();
}
// Let's check what happens when the header descriptor is right before the gap.
vsock_bof_helper(&mut test_ctx, 0, MMIO32_MEM_START - 1, VSOCK_PKT_HDR_SIZE);
// Let's check what happens when the buffer descriptor crosses into the gap, but does
// not go past its right edge.
vsock_bof_helper(
&mut test_ctx,
1,
MMIO32_MEM_START - 4,
MMIO32_MEM_SIZE as u32 + 4,
);
// Let's modify the buffer descriptor addr and len such that it crosses over the MMIO gap,
// and check we cannot assemble the VsockPkts.
vsock_bof_helper(
&mut test_ctx,
1,
MMIO32_MEM_START - 4,
MMIO32_MEM_SIZE as u32 + 100,
);
}
#[test]
fn test_event_handler() {
let mut event_manager = EventManager::new().unwrap();
let test_ctx = TestContext::new();
let EventHandlerContext {
device,
guest_rxvq,
guest_txvq,
..
} = test_ctx.create_event_handler_context();
let vsock = Arc::new(Mutex::new(device));
let _id = event_manager.add_subscriber(vsock.clone());
// Push a queue event
// - the driver has something to send (there's data in the TX queue); and
// - the backend also has some pending RX data.
{
let mut device = vsock.lock().unwrap();
device.backend.set_pending_rx(true);
device.queue_events[TXQ_INDEX].write(1).unwrap();
}
// EventManager should report no events since vsock has only registered
// its activation event so far (even though there is also a queue event pending).
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 0);
// Manually force a queue event and check it's ignored pre-activation.
{
let device = vsock.lock().unwrap();
// Artificially push event.
device.queue_events[TXQ_INDEX].write(1).unwrap();
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 0);
// Both available RX and TX descriptors should be untouched.
assert_eq!(guest_rxvq.used.idx.get(), 0);
assert_eq!(guest_txvq.used.idx.get(), 0);
}
// Now activate the device.
vsock
.lock()
.unwrap()
.activate(test_ctx.mem.clone(), test_ctx.interrupt.clone())
.unwrap();
// Process the activate event.
let ev_count = event_manager.run_with_timeout(50).unwrap();
assert_eq!(ev_count, 1);
// Handle the previously pushed queue event through EventManager.
{
let ev_count = event_manager
.run_with_timeout(100)
.expect("Metrics event timeout or error.");
assert_eq!(ev_count, 1);
// Both available RX and TX descriptors should have been used.
assert_eq!(guest_rxvq.used.idx.get(), 1);
assert_eq!(guest_txvq.used.idx.get(), 1);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/metrics.rs | src/vmm/src/devices/virtio/vsock/metrics.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system for vsock devices.
//!
//! # Metrics format
//! The metrics are flushed in JSON when requested by vmm::logger::metrics::METRICS.write().
//!
//! ## JSON example with metrics:
//! ```json
//! "vsock": {
//! "activate_fails": "SharedIncMetric",
//! "cfg_fails": "SharedIncMetric",
//! "rx_queue_event_fails": "SharedIncMetric",
//! "tx_queue_event_fails": "SharedIncMetric",
//! "ev_queue_event_fails": "SharedIncMetric",
//! "muxer_event_fails": "SharedIncMetric",
//! ...
//! }
//! }
//! ```
//! Each `vsock` field in the example above is a serializable `VsockDeviceMetrics` structure
//! collecting metrics such as `activate_fails`, `cfg_fails`, etc. for the Vsock device.
//! Since vsock doesn't support multiple devices, there is no per device metrics and
//! `vsock` represents the aggregate metrics for all vsock connections.
//!
//! # Design
//! The main design goals of this system are:
//! * Have a consistent approach of keeping device related metrics in the individual devices
//! modules.
//! * To decouple vsock device metrics from logger module by moving VsockDeviceMetrics out of
//! FirecrackerDeviceMetrics.
//! * Rely on `serde` to provide the actual serialization for writing the metrics.
//!
//! The system implements 1 type of metrics:
//! * Shared Incremental Metrics (SharedIncMetrics) - dedicated for the metrics which need a counter
//! (i.e the number of times an API request failed). These metrics are reset upon flush.
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use crate::logger::SharedIncMetric;
/// Stores aggregate metrics of all Vsock connections/actions
pub(super) static METRICS: VsockDeviceMetrics = VsockDeviceMetrics::new();
/// Called by METRICS.flush(), this function facilitates serialization of vsock device metrics.
pub fn flush_metrics<S: Serializer>(serializer: S) -> Result<S::Ok, S::Error> {
let mut seq = serializer.serialize_map(Some(1))?;
seq.serialize_entry("vsock", &METRICS)?;
seq.end()
}
/// Vsock-related metrics.
#[derive(Debug, Serialize)]
pub(super) struct VsockDeviceMetrics {
/// Number of times when activate failed on a vsock device.
pub activate_fails: SharedIncMetric,
/// Number of times when interacting with the space config of a vsock device failed.
pub cfg_fails: SharedIncMetric,
/// Number of times when handling RX queue events on a vsock device failed.
pub rx_queue_event_fails: SharedIncMetric,
/// Number of times when handling TX queue events on a vsock device failed.
pub tx_queue_event_fails: SharedIncMetric,
/// Number of times when handling event queue events on a vsock device failed.
pub ev_queue_event_fails: SharedIncMetric,
/// Number of times when handling muxer events on a vsock device failed.
pub muxer_event_fails: SharedIncMetric,
/// Number of times when handling connection events on a vsock device failed.
pub conn_event_fails: SharedIncMetric,
/// Number of events associated with the receiving queue.
pub rx_queue_event_count: SharedIncMetric,
/// Number of events associated with the transmitting queue.
pub tx_queue_event_count: SharedIncMetric,
/// Number of bytes received.
pub rx_bytes_count: SharedIncMetric,
/// Number of transmitted bytes.
pub tx_bytes_count: SharedIncMetric,
/// Number of packets received.
pub rx_packets_count: SharedIncMetric,
/// Number of transmitted packets.
pub tx_packets_count: SharedIncMetric,
/// Number of added connections.
pub conns_added: SharedIncMetric,
/// Number of killed connections.
pub conns_killed: SharedIncMetric,
/// Number of removed connections.
pub conns_removed: SharedIncMetric,
/// How many times the killq has been resynced.
pub killq_resync: SharedIncMetric,
/// How many flush fails have been seen.
pub tx_flush_fails: SharedIncMetric,
/// How many write fails have been seen.
pub tx_write_fails: SharedIncMetric,
/// Number of times read() has failed.
pub rx_read_fails: SharedIncMetric,
}
impl VsockDeviceMetrics {
// We need this because vsock::metrics::METRICS does not accept
// VsockDeviceMetrics::default()
const fn new() -> Self {
Self {
activate_fails: SharedIncMetric::new(),
cfg_fails: SharedIncMetric::new(),
rx_queue_event_fails: SharedIncMetric::new(),
tx_queue_event_fails: SharedIncMetric::new(),
ev_queue_event_fails: SharedIncMetric::new(),
muxer_event_fails: SharedIncMetric::new(),
conn_event_fails: SharedIncMetric::new(),
rx_queue_event_count: SharedIncMetric::new(),
tx_queue_event_count: SharedIncMetric::new(),
rx_bytes_count: SharedIncMetric::new(),
tx_bytes_count: SharedIncMetric::new(),
rx_packets_count: SharedIncMetric::new(),
tx_packets_count: SharedIncMetric::new(),
conns_added: SharedIncMetric::new(),
conns_killed: SharedIncMetric::new(),
conns_removed: SharedIncMetric::new(),
killq_resync: SharedIncMetric::new(),
tx_flush_fails: SharedIncMetric::new(),
tx_write_fails: SharedIncMetric::new(),
rx_read_fails: SharedIncMetric::new(),
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::logger::IncMetric;
#[test]
fn test_vsock_dev_metrics() {
let vsock_metrics: VsockDeviceMetrics = VsockDeviceMetrics::new();
let vsock_metrics_local: String = serde_json::to_string(&vsock_metrics).unwrap();
// the 1st serialize flushes the metrics and resets values to 0 so that
// we can compare the values with local metrics.
serde_json::to_string(&METRICS).unwrap();
let vsock_metrics_global: String = serde_json::to_string(&METRICS).unwrap();
assert_eq!(vsock_metrics_local, vsock_metrics_global);
vsock_metrics.conns_added.inc();
assert_eq!(vsock_metrics.conns_added.count(), 1);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/unix/muxer_rxq.rs | src/vmm/src/devices/virtio/vsock/unix/muxer_rxq.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
/// `MuxerRxQ` implements a helper object that `VsockMuxer` can use for queuing RX (host ->
/// guest) packets (or rather instructions on how to build said packets).
///
/// Under ideal operation, every connection, that has pending RX data, will be present in the
/// muxer RX queue. However, since the RX queue is smaller than the connection pool, it may,
/// under some conditions, become full, meaning that it can no longer account for all the
/// connections that can yield RX data. When that happens, we say that it is no longer
/// "synchronized" (i.e. with the connection pool). A desynchronized RX queue still holds
/// valid data, and the muxer will continue to pop packets from it. However, when a
/// desynchronized queue is drained, additional data may still be available, so the muxer will
/// have to perform a more costly walk of the entire connection pool to find it. This walk is
/// performed here, as part of building an RX queue from the connection pool. When an
/// out-of-sync is drained, the muxer will discard it, and attempt to rebuild a synced one.
use std::collections::{HashMap, VecDeque};
use super::super::VsockChannel;
use super::muxer::{ConnMapKey, MuxerRx};
use super::{MuxerConnection, defs};
/// The muxer RX queue.
#[derive(Debug)]
pub struct MuxerRxQ {
/// The RX queue data.
q: VecDeque<MuxerRx>,
/// The RX queue sync status.
synced: bool,
}
impl MuxerRxQ {
const SIZE: usize = defs::MUXER_RXQ_SIZE as usize;
/// Trivial RX queue constructor.
pub fn new() -> Self {
Self {
q: VecDeque::with_capacity(Self::SIZE),
synced: true,
}
}
/// Attempt to build an RX queue, that is synchronized to the connection pool.
/// Note: the resulting queue may still be desynchronized, if there are too many connections
/// that have pending RX data. In that case, the muxer will first drain this queue, and
/// then try again to build a synchronized one.
pub fn from_conn_map(conn_map: &HashMap<ConnMapKey, MuxerConnection>) -> Self {
let mut q = VecDeque::new();
let mut synced = true;
for (key, conn) in conn_map.iter() {
if !conn.has_pending_rx() {
continue;
}
if q.len() >= Self::SIZE {
synced = false;
break;
}
q.push_back(MuxerRx::ConnRx(*key));
}
Self { q, synced }
}
/// Push a new RX item to the queue.
///
/// A push will fail when:
/// - trying to push a connection key onto an out-of-sync, or full queue; or
/// - trying to push an RST onto a queue already full of RSTs.
///
/// RSTs take precedence over connections, because connections can always be queried for
/// pending RX data later. Aside from this queue, there is no other storage for RSTs, so
/// failing to push one means that we have to drop the packet.
///
/// Returns:
/// - `true` if the new item has been successfully queued; or
/// - `false` if there was no room left in the queue.
pub fn push(&mut self, rx: MuxerRx) -> bool {
// Pushing to a non-full, synchronized queue will always succeed.
if self.is_synced() && !self.is_full() {
self.q.push_back(rx);
return true;
}
match rx {
MuxerRx::RstPkt { .. } => {
// If we just failed to push an RST packet, we'll look through the queue, trying to
// find a connection key that we could evict. This way, the queue does lose sync,
// but we don't drop any packets.
for qi in self.q.iter_mut().rev() {
if let MuxerRx::ConnRx(_) = qi {
*qi = rx;
self.synced = false;
return true;
}
}
}
MuxerRx::ConnRx(_) => {
self.synced = false;
}
};
false
}
/// Peek into the front of the queue.
pub fn peek(&self) -> Option<MuxerRx> {
self.q.front().copied()
}
/// Pop an RX item from the front of the queue.
pub fn pop(&mut self) -> Option<MuxerRx> {
self.q.pop_front()
}
/// Check if the RX queue is synchronized with the connection pool.
pub fn is_synced(&self) -> bool {
self.synced
}
/// Get the total number of items in the queue.
pub fn len(&self) -> usize {
self.q.len()
}
/// Check if the queue is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Check if the queue is full.
pub fn is_full(&self) -> bool {
self.len() == Self::SIZE
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/unix/mod.rs | src/vmm/src/devices/virtio/vsock/unix/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
/// This module implements the Unix Domain Sockets backend for vsock - a mediator between
/// guest-side AF_VSOCK sockets and host-side AF_UNIX sockets. The heavy lifting is performed by
/// `muxer::VsockMuxer`, a connection multiplexer that uses `super::csm::VsockConnection` for
/// handling vsock connection states.
/// Check out `muxer.rs` for a more detailed explanation of the inner workings of this backend.
mod muxer;
mod muxer_killq;
mod muxer_rxq;
pub use muxer::VsockMuxer as VsockUnixBackend;
use crate::devices::virtio::vsock::csm::VsockConnectionBackend;
mod defs {
/// Maximum number of established connections that we can handle.
pub const MAX_CONNECTIONS: usize = 1023;
/// Size of the muxer RX packet queue.
pub const MUXER_RXQ_SIZE: u32 = 256;
/// Size of the muxer connection kill queue.
pub const MUXER_KILLQ_SIZE: u32 = 128;
}
/// Vsock backend related errors.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VsockUnixBackendError {
/// Error registering a new epoll-listening FD: {0}
EpollAdd(std::io::Error),
/// Error creating an epoll FD: {0}
EpollFdCreate(std::io::Error),
/// The host made an invalid vsock port connection request.
InvalidPortRequest,
/// Error accepting a new connection from the host-side Unix socket: {0}
UnixAccept(std::io::Error),
/// Error binding to the host-side Unix socket: {0}
UnixBind(std::io::Error),
/// Error connecting to a host-side Unix socket: {0}
UnixConnect(std::io::Error),
/// Error reading from host-side Unix socket: {0}
UnixRead(std::io::Error),
/// Muxer connection limit reached.
TooManyConnections,
}
type MuxerConnection = super::csm::VsockConnection<std::os::unix::net::UnixStream>;
impl VsockConnectionBackend for std::os::unix::net::UnixStream {}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/unix/muxer.rs | src/vmm/src/devices/virtio/vsock/unix/muxer.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
/// `VsockMuxer` is the device-facing component of the Unix domain sockets vsock backend. I.e.
/// by implementing the `VsockBackend` trait, it abstracts away the gory details of translating
/// between AF_VSOCK and AF_UNIX, and presents a clean interface to the rest of the vsock
/// device model.
///
/// The vsock muxer has two main roles:
/// 1. Vsock connection multiplexer: It's the muxer's job to create, manage, and terminate
/// `VsockConnection` objects. The muxer also routes packets to their owning connections. It
/// does so via a connection `HashMap`, keyed by what is basically a (host_port, guest_port)
/// tuple. Vsock packet traffic needs to be inspected, in order to detect connection request
/// packets (leading to the creation of a new connection), and connection reset packets
/// (leading to the termination of an existing connection). All other packets, though, must
/// belong to an existing connection and, as such, the muxer simply forwards them.
/// 2. Event dispatcher There are three event categories that the vsock backend is interested
/// it:
/// 1. A new host-initiated connection is ready to be accepted from the listening host Unix
/// socket;
/// 2. Data is available for reading from a newly-accepted host-initiated connection (i.e.
/// the host is ready to issue a vsock connection request, informing us of the
/// destination port to which it wants to connect);
/// 3. Some event was triggered for a connected Unix socket, that belongs to a
/// `VsockConnection`.
///
/// The muxer gets notified about all of these events, because, as a `VsockEpollListener`
/// implementor, it gets to register a nested epoll FD into the main VMM epolling loop. All
/// other pollable FDs are then registered under this nested epoll FD.
/// To route all these events to their handlers, the muxer uses another `HashMap` object,
/// mapping `RawFd`s to `EpollListener`s.
use std::collections::{HashMap, HashSet};
use std::fmt::Debug;
use std::io::Read;
use std::os::unix::io::{AsRawFd, RawFd};
use std::os::unix::net::{UnixListener, UnixStream};
use log::{debug, error, info, warn};
use vmm_sys_util::epoll::{ControlOperation, Epoll, EpollEvent, EventSet};
use super::super::csm::ConnState;
use super::super::defs::uapi;
use super::super::{VsockBackend, VsockChannel, VsockEpollListener, VsockError};
use super::muxer_killq::MuxerKillQ;
use super::muxer_rxq::MuxerRxQ;
use super::{MuxerConnection, VsockUnixBackendError, defs};
use crate::devices::virtio::vsock::metrics::METRICS;
use crate::devices::virtio::vsock::packet::{VsockPacketRx, VsockPacketTx};
use crate::logger::IncMetric;
/// A unique identifier of a `MuxerConnection` object. Connections are stored in a hash map,
/// keyed by a `ConnMapKey` object.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConnMapKey {
local_port: u32,
peer_port: u32,
}
/// A muxer RX queue item.
#[derive(Clone, Copy, Debug)]
pub enum MuxerRx {
/// The packet must be fetched from the connection identified by `ConnMapKey`.
ConnRx(ConnMapKey),
/// The muxer must produce an RST packet.
RstPkt { local_port: u32, peer_port: u32 },
}
/// An epoll listener, registered under the muxer's nested epoll FD.
#[derive(Debug)]
enum EpollListener {
/// The listener is a `MuxerConnection`, identified by `key`, and interested in the events
/// in `evset`. Since `MuxerConnection` implements `VsockEpollListener`, notifications will
/// be forwarded to the listener via `VsockEpollListener::notify()`.
Connection { key: ConnMapKey, evset: EventSet },
/// A listener interested in new host-initiated connections.
HostSock,
/// A listener interested in reading host `connect <port>` commands from a freshly
/// connected host socket.
LocalStream(UnixStream),
}
/// The vsock connection multiplexer.
#[derive(Debug)]
pub struct VsockMuxer {
/// Guest CID.
cid: u64,
/// A hash map used to store the active connections.
conn_map: HashMap<ConnMapKey, MuxerConnection>,
/// A hash map used to store epoll event listeners / handlers.
listener_map: HashMap<RawFd, EpollListener>,
/// The RX queue. Items in this queue are consumed by `VsockMuxer::recv_pkt()`, and
/// produced
/// - by `VsockMuxer::send_pkt()` (e.g. RST in response to a connection request packet); and
/// - in response to EPOLLIN events (e.g. data available to be read from an AF_UNIX socket).
rxq: MuxerRxQ,
/// A queue used for terminating connections that are taking too long to shut down.
killq: MuxerKillQ,
/// The Unix socket, through which host-initiated connections are accepted.
host_sock: UnixListener,
/// The file system path of the host-side Unix socket. This is used to figure out the path
/// to Unix sockets listening on specific ports. I.e. `"<this path>_<port number>"`.
pub(crate) host_sock_path: String,
/// The nested epoll event set, used to register epoll listeners.
epoll: Epoll,
/// A hash set used to keep track of used host-side (local) ports, in order to assign local
/// ports to host-initiated connections.
local_port_set: HashSet<u32>,
/// The last used host-side port.
local_port_last: u32,
}
impl VsockChannel for VsockMuxer {
/// Deliver a vsock packet to the guest vsock driver.
///
/// Retuns:
/// - `Ok(())`: `pkt` has been successfully filled in; or
/// - `Err(VsockError::NoData)`: there was no available data with which to fill in the packet.
fn recv_pkt(&mut self, pkt: &mut VsockPacketRx) -> Result<(), VsockError> {
// We'll look for instructions on how to build the RX packet in the RX queue. If the
// queue is empty, that doesn't necessarily mean we don't have any pending RX, since
// the queue might be out-of-sync. If that's the case, we'll attempt to sync it first,
// and then try to pop something out again.
if self.rxq.is_empty() && !self.rxq.is_synced() {
self.rxq = MuxerRxQ::from_conn_map(&self.conn_map);
}
while let Some(rx) = self.rxq.peek() {
let res = match rx {
// We need to build an RST packet, going from `local_port` to `peer_port`.
MuxerRx::RstPkt {
local_port,
peer_port,
} => {
pkt.hdr
.set_op(uapi::VSOCK_OP_RST)
.set_src_cid(uapi::VSOCK_HOST_CID)
.set_dst_cid(self.cid)
.set_src_port(local_port)
.set_dst_port(peer_port)
.set_len(0)
.set_type(uapi::VSOCK_TYPE_STREAM)
.set_flags(0)
.set_buf_alloc(0)
.set_fwd_cnt(0);
self.rxq.pop().unwrap();
return Ok(());
}
// We'll defer building the packet to this connection, since it has something
// to say.
MuxerRx::ConnRx(key) => {
let mut conn_res = Err(VsockError::NoData);
let mut do_pop = true;
self.apply_conn_mutation(key, |conn| {
conn_res = conn.recv_pkt(pkt);
do_pop = !conn.has_pending_rx();
});
if do_pop {
self.rxq.pop().unwrap();
}
conn_res
}
};
if res.is_ok() {
// Inspect traffic, looking for RST packets, since that means we have to
// terminate and remove this connection from the active connection pool.
//
if pkt.hdr.op() == uapi::VSOCK_OP_RST {
self.remove_connection(ConnMapKey {
local_port: pkt.hdr.src_port(),
peer_port: pkt.hdr.dst_port(),
});
}
debug!("vsock muxer: RX pkt: {:?}", pkt.hdr);
return Ok(());
}
}
Err(VsockError::NoData)
}
/// Deliver a guest-generated packet to its destination in the vsock backend.
///
/// This absorbs unexpected packets, handles RSTs (by dropping connections), and forwards
/// all the rest to their owning `MuxerConnection`.
///
/// Returns:
/// always `Ok(())` - the packet has been consumed, and its virtio TX buffers can be
/// returned to the guest vsock driver.
fn send_pkt(&mut self, pkt: &VsockPacketTx) -> Result<(), VsockError> {
let conn_key = ConnMapKey {
local_port: pkt.hdr.dst_port(),
peer_port: pkt.hdr.src_port(),
};
debug!(
"vsock: muxer.send[rxq.len={}]: {:?}",
self.rxq.len(),
pkt.hdr
);
// If this packet has an unsupported type (!=stream), we must send back an RST.
//
if pkt.hdr.type_() != uapi::VSOCK_TYPE_STREAM {
self.enq_rst(pkt.hdr.dst_port(), pkt.hdr.src_port());
return Ok(());
}
// We don't know how to handle packets addressed to other CIDs. We only handle the host
// part of the guest - host communication here.
if pkt.hdr.dst_cid() != uapi::VSOCK_HOST_CID {
info!(
"vsock: dropping guest packet for unknown CID: {:?}",
pkt.hdr
);
return Ok(());
}
if !self.conn_map.contains_key(&conn_key) {
// This packet can't be routed to any active connection (based on its src and dst
// ports). The only orphan / unroutable packets we know how to handle are
// connection requests.
if pkt.hdr.op() == uapi::VSOCK_OP_REQUEST {
// Oh, this is a connection request!
self.handle_peer_request_pkt(pkt);
} else {
// Send back an RST, to let the drive know we weren't expecting this packet.
self.enq_rst(pkt.hdr.dst_port(), pkt.hdr.src_port());
}
return Ok(());
}
// Right, we know where to send this packet, then (to `conn_key`).
// However, if this is an RST, we have to forcefully terminate the connection, so
// there's no point in forwarding it the packet.
if pkt.hdr.op() == uapi::VSOCK_OP_RST {
self.remove_connection(conn_key);
return Ok(());
}
// Alright, everything looks in order - forward this packet to its owning connection.
let mut res: Result<(), VsockError> = Ok(());
self.apply_conn_mutation(conn_key, |conn| {
res = conn.send_pkt(pkt);
});
res
}
/// Check if the muxer has any pending RX data, with which to fill a guest-provided RX
/// buffer.
fn has_pending_rx(&self) -> bool {
!self.rxq.is_empty() || !self.rxq.is_synced()
}
}
impl AsRawFd for VsockMuxer {
/// Get the FD to be registered for polling upstream (in the main VMM epoll loop, in this
/// case).
///
/// This will be the muxer's nested epoll FD.
fn as_raw_fd(&self) -> RawFd {
self.epoll.as_raw_fd()
}
}
impl VsockEpollListener for VsockMuxer {
/// Get the epoll events to be polled upstream.
///
/// Since the polled FD is a nested epoll FD, we're only interested in EPOLLIN events (i.e.
/// some event occurred on one of the FDs registered under our epoll FD).
fn get_polled_evset(&self) -> EventSet {
EventSet::IN
}
/// Notify the muxer about a pending event having occured under its nested epoll FD.
fn notify(&mut self, _: EventSet) {
let mut epoll_events = vec![EpollEvent::new(EventSet::empty(), 0); 32];
match self.epoll.wait(0, epoll_events.as_mut_slice()) {
Ok(ev_cnt) => {
for ev in &epoll_events[0..ev_cnt] {
self.handle_event(
ev.fd(),
// It's ok to unwrap here, since the `epoll_events[i].events` is filled
// in by `epoll::wait()`, and therefore contains only valid epoll
// flags.
EventSet::from_bits(ev.events).unwrap(),
);
}
}
Err(err) => {
warn!("vsock: failed to consume muxer epoll event: {}", err);
METRICS.muxer_event_fails.inc();
}
}
}
}
impl VsockBackend for VsockMuxer {}
impl VsockMuxer {
/// Muxer constructor.
pub fn new(cid: u64, host_sock_path: String) -> Result<Self, VsockUnixBackendError> {
// Open/bind on the host Unix socket, so we can accept host-initiated
// connections.
let host_sock = UnixListener::bind(&host_sock_path)
.and_then(|sock| sock.set_nonblocking(true).map(|_| sock))
.map_err(VsockUnixBackendError::UnixBind)?;
let mut muxer = Self {
cid,
host_sock,
host_sock_path,
epoll: Epoll::new().map_err(VsockUnixBackendError::EpollFdCreate)?,
rxq: MuxerRxQ::new(),
conn_map: HashMap::with_capacity(defs::MAX_CONNECTIONS),
listener_map: HashMap::with_capacity(defs::MAX_CONNECTIONS + 1),
killq: MuxerKillQ::new(),
local_port_last: (1u32 << 30) - 1,
local_port_set: HashSet::with_capacity(defs::MAX_CONNECTIONS),
};
// Listen on the host initiated socket, for incoming connections.
muxer.add_listener(muxer.host_sock.as_raw_fd(), EpollListener::HostSock)?;
Ok(muxer)
}
/// Return the file system path of the host-side Unix socket.
pub fn host_sock_path(&self) -> &str {
&self.host_sock_path
}
/// Handle/dispatch an epoll event to its listener.
fn handle_event(&mut self, fd: RawFd, event_set: EventSet) {
debug!(
"vsock: muxer processing event: fd={}, evset={:?}",
fd, event_set
);
match self.listener_map.get_mut(&fd) {
// This event needs to be forwarded to a `MuxerConnection` that is listening for
// it.
Some(EpollListener::Connection { key, evset: _ }) => {
let key_copy = *key;
// The handling of this event will most probably mutate the state of the
// receiving connection. We'll need to check for new pending RX, event set
// mutation, and all that, so we're wrapping the event delivery inside those
// checks.
self.apply_conn_mutation(key_copy, |conn| {
conn.notify(event_set);
});
}
// A new host-initiated connection is ready to be accepted.
Some(EpollListener::HostSock) => {
if self.conn_map.len() == defs::MAX_CONNECTIONS {
// If we're already maxed-out on connections, we'll just accept and
// immediately discard this potentially new one.
warn!("vsock: connection limit reached; refusing new host connection");
self.host_sock.accept().map(|_| 0).unwrap_or(0);
return;
}
self.host_sock
.accept()
.map_err(VsockUnixBackendError::UnixAccept)
.and_then(|(stream, _)| {
stream
.set_nonblocking(true)
.map(|_| stream)
.map_err(VsockUnixBackendError::UnixAccept)
})
.and_then(|stream| {
// Before forwarding this connection to a listening AF_VSOCK socket on
// the guest side, we need to know the destination port. We'll read
// that port from a "connect" command received on this socket, so the
// next step is to ask to be notified the moment we can read from it.
self.add_listener(stream.as_raw_fd(), EpollListener::LocalStream(stream))
})
.unwrap_or_else(|err| {
warn!("vsock: unable to accept local connection: {:?}", err);
});
}
// Data is ready to be read from a host-initiated connection. That would be the
// "connect" command that we're expecting.
Some(EpollListener::LocalStream(_)) => {
if let Some(EpollListener::LocalStream(mut stream)) = self.remove_listener(fd) {
Self::read_local_stream_port(&mut stream)
.map(|peer_port| (self.allocate_local_port(), peer_port))
.and_then(|(local_port, peer_port)| {
self.add_connection(
ConnMapKey {
local_port,
peer_port,
},
MuxerConnection::new_local_init(
stream,
uapi::VSOCK_HOST_CID,
self.cid,
local_port,
peer_port,
),
)
})
.unwrap_or_else(|err| {
info!("vsock: error adding local-init connection: {:?}", err);
})
}
}
_ => {
info!(
"vsock: unexpected event: fd={:?}, evset={:?}",
fd, event_set
);
METRICS.muxer_event_fails.inc();
}
}
}
/// Parse a host "connect" command, and extract the destination vsock port.
fn read_local_stream_port(stream: &mut UnixStream) -> Result<u32, VsockUnixBackendError> {
let mut buf = [0u8; 32];
// This is the minimum number of bytes that we should be able to read, when parsing a
// valid connection request. I.e. `b"connect 0\n".len()`.
const MIN_READ_LEN: usize = 10;
// Bring in the minimum number of bytes that we should be able to read.
stream
.read_exact(&mut buf[..MIN_READ_LEN])
.map_err(VsockUnixBackendError::UnixRead)?;
// Now, finish reading the destination port number, by bringing in one byte at a time,
// until we reach an EOL terminator (or our buffer space runs out). Yeah, not
// particularly proud of this approach, but it will have to do for now.
let mut blen = MIN_READ_LEN;
while buf[blen - 1] != b'\n' && blen < buf.len() {
stream
.read_exact(&mut buf[blen..=blen])
.map_err(VsockUnixBackendError::UnixRead)?;
blen += 1;
}
let mut word_iter = std::str::from_utf8(&buf[..blen])
.map_err(|_| VsockUnixBackendError::InvalidPortRequest)?
.split_whitespace();
word_iter
.next()
.ok_or(VsockUnixBackendError::InvalidPortRequest)
.and_then(|word| {
if word.to_lowercase() == "connect" {
Ok(())
} else {
Err(VsockUnixBackendError::InvalidPortRequest)
}
})
.and_then(|_| {
word_iter
.next()
.ok_or(VsockUnixBackendError::InvalidPortRequest)
})
.and_then(|word| {
word.parse::<u32>()
.map_err(|_| VsockUnixBackendError::InvalidPortRequest)
})
.map_err(|_| VsockUnixBackendError::InvalidPortRequest)
}
/// Add a new connection to the active connection pool.
fn add_connection(
&mut self,
key: ConnMapKey,
conn: MuxerConnection,
) -> Result<(), VsockUnixBackendError> {
// We might need to make room for this new connection, so let's sweep the kill queue
// first. It's fine to do this here because:
// - unless the kill queue is out of sync, this is a pretty inexpensive operation; and
// - we are under no pressure to respect any accurate timing for connection termination.
self.sweep_killq();
if self.conn_map.len() >= defs::MAX_CONNECTIONS {
info!(
"vsock: muxer connection limit reached ({})",
defs::MAX_CONNECTIONS
);
return Err(VsockUnixBackendError::TooManyConnections);
}
self.add_listener(
conn.as_raw_fd(),
EpollListener::Connection {
key,
evset: conn.get_polled_evset(),
},
)
.map(|_| {
if conn.has_pending_rx() {
// We can safely ignore any error in adding a connection RX indication. Worst
// case scenario, the RX queue will get desynchronized, but we'll handle that
// the next time we need to yield an RX packet.
self.rxq.push(MuxerRx::ConnRx(key));
}
self.conn_map.insert(key, conn);
METRICS.conns_added.inc();
})
}
/// Remove a connection from the active connection poll.
fn remove_connection(&mut self, key: ConnMapKey) {
if let Some(conn) = self.conn_map.remove(&key) {
self.remove_listener(conn.as_raw_fd());
METRICS.conns_removed.inc();
}
self.free_local_port(key.local_port);
}
/// Schedule a connection for immediate termination.
/// I.e. as soon as we can also let our peer know we're dropping the connection, by sending
/// it an RST packet.
fn kill_connection(&mut self, key: ConnMapKey) {
let mut had_rx = false;
METRICS.conns_killed.inc();
self.conn_map.entry(key).and_modify(|conn| {
had_rx = conn.has_pending_rx();
conn.kill();
});
// This connection will now have an RST packet to yield, so we need to add it to the RX
// queue. However, there's no point in doing that if it was already in the queue.
if !had_rx {
// We can safely ignore any error in adding a connection RX indication. Worst case
// scenario, the RX queue will get desynchronized, but we'll handle that the next
// time we need to yield an RX packet.
self.rxq.push(MuxerRx::ConnRx(key));
}
}
/// Register a new epoll listener under the muxer's nested epoll FD.
fn add_listener(
&mut self,
fd: RawFd,
listener: EpollListener,
) -> Result<(), VsockUnixBackendError> {
let evset = match listener {
EpollListener::Connection { evset, .. } => evset,
EpollListener::LocalStream(_) => EventSet::IN,
EpollListener::HostSock => EventSet::IN,
};
self.epoll
.ctl(
ControlOperation::Add,
fd,
EpollEvent::new(evset, u64::try_from(fd).unwrap()),
)
.map(|_| {
self.listener_map.insert(fd, listener);
})
.map_err(VsockUnixBackendError::EpollAdd)?;
Ok(())
}
/// Remove (and return) a previously registered epoll listener.
fn remove_listener(&mut self, fd: RawFd) -> Option<EpollListener> {
let maybe_listener = self.listener_map.remove(&fd);
if maybe_listener.is_some() {
self.epoll
.ctl(ControlOperation::Delete, fd, EpollEvent::default())
.unwrap_or_else(|err| {
warn!(
"vosck muxer: error removing epoll listener for fd {:?}: {:?}",
fd, err
);
});
}
maybe_listener
}
/// Allocate a host-side port to be assigned to a new host-initiated connection.
fn allocate_local_port(&mut self) -> u32 {
// TODO: this doesn't seem very space-efficient.
// Mybe rewrite this to limit port range and use a bitmap?
//
loop {
self.local_port_last = (self.local_port_last + 1) & !(1 << 31) | (1 << 30);
if self.local_port_set.insert(self.local_port_last) {
break;
}
}
self.local_port_last
}
/// Mark a previously used host-side port as free.
fn free_local_port(&mut self, port: u32) {
self.local_port_set.remove(&port);
}
/// Handle a new connection request comming from our peer (the guest vsock driver).
///
/// This will attempt to connect to a host-side Unix socket, expected to be listening at
/// the file system path corresponing to the destination port. If successful, a new
/// connection object will be created and added to the connection pool. On failure, a new
/// RST packet will be scheduled for delivery to the guest.
fn handle_peer_request_pkt(&mut self, pkt: &VsockPacketTx) {
let port_path = format!("{}_{}", self.host_sock_path, pkt.hdr.dst_port());
UnixStream::connect(port_path)
.and_then(|stream| stream.set_nonblocking(true).map(|_| stream))
.map_err(VsockUnixBackendError::UnixConnect)
.and_then(|stream| {
self.add_connection(
ConnMapKey {
local_port: pkt.hdr.dst_port(),
peer_port: pkt.hdr.src_port(),
},
MuxerConnection::new_peer_init(
stream,
uapi::VSOCK_HOST_CID,
self.cid,
pkt.hdr.dst_port(),
pkt.hdr.src_port(),
pkt.hdr.buf_alloc(),
),
)
})
.unwrap_or_else(|_| self.enq_rst(pkt.hdr.dst_port(), pkt.hdr.src_port()));
}
/// Perform an action that might mutate a connection's state.
///
/// This is used as shorthand for repetitive tasks that need to be performed after a
/// connection object mutates. E.g.
/// - update the connection's epoll listener;
/// - schedule the connection to be queried for RX data;
/// - kill the connection if an unrecoverable error occurs.
fn apply_conn_mutation<F>(&mut self, key: ConnMapKey, mut_fn: F)
where
F: FnOnce(&mut MuxerConnection),
{
if let Some(conn) = self.conn_map.get_mut(&key) {
let had_rx = conn.has_pending_rx();
let was_expiring = conn.will_expire();
let prev_state = conn.state();
mut_fn(conn);
// If this is a host-initiated connection that has just become established, we'll have
// to send an ack message to the host end.
if prev_state == ConnState::LocalInit && conn.state() == ConnState::Established {
let msg = format!("OK {}\n", key.local_port);
match conn.send_bytes_raw(msg.as_bytes()) {
Ok(written) if written == msg.len() => (),
Ok(_) => {
// If we can't write a dozen bytes to a pristine connection something
// must be really wrong. Killing it.
conn.kill();
warn!("vsock: unable to fully write connection ack msg.");
}
Err(err) => {
conn.kill();
warn!("vsock: unable to ack host connection: {:?}", err);
}
};
}
// If the connection wasn't previously scheduled for RX, add it to our RX queue.
if !had_rx && conn.has_pending_rx() {
self.rxq.push(MuxerRx::ConnRx(key));
}
// If the connection wasn't previously scheduled for termination, add it to the
// kill queue.
if !was_expiring && conn.will_expire() {
// It's safe to unwrap here, since `conn.will_expire()` already guaranteed that
// an `conn.expiry` is available.
self.killq.push(key, conn.expiry().unwrap());
}
let fd = conn.as_raw_fd();
let new_evset = conn.get_polled_evset();
if new_evset.is_empty() {
// If the connection no longer needs epoll notifications, remove its listener
// from our list.
self.remove_listener(fd);
return;
}
if let Some(EpollListener::Connection { evset, .. }) = self.listener_map.get_mut(&fd) {
if *evset != new_evset {
// If the set of events that the connection is interested in has changed,
// we need to update its epoll listener.
debug!(
"vsock: updating listener for (lp={}, pp={}): old={:?}, new={:?}",
key.local_port, key.peer_port, *evset, new_evset
);
*evset = new_evset;
self.epoll
.ctl(
ControlOperation::Modify,
fd,
EpollEvent::new(new_evset, u64::try_from(fd).unwrap()),
)
.unwrap_or_else(|err| {
// This really shouldn't happen, like, ever. However, "famous last
// words" and all that, so let's just kill it with fire, and walk away.
self.kill_connection(key);
error!(
"vsock: error updating epoll listener for (lp={}, pp={}): {:?}",
key.local_port, key.peer_port, err
);
METRICS.muxer_event_fails.inc();
});
}
} else {
// The connection had previously asked to be removed from the listener map (by
// returning an empty event set via `get_polled_fd()`), but now wants back in.
self.add_listener(
fd,
EpollListener::Connection {
key,
evset: new_evset,
},
)
.unwrap_or_else(|err| {
self.kill_connection(key);
error!(
"vsock: error updating epoll listener for (lp={}, pp={}): {:?}",
key.local_port, key.peer_port, err
);
METRICS.muxer_event_fails.inc();
});
}
}
}
/// Check if any connections have timed out, and if so, schedule them for immediate
/// termination.
fn sweep_killq(&mut self) {
while let Some(key) = self.killq.pop() {
// Connections don't get removed from the kill queue when their kill timer is
// disarmed, since that would be a costly operation. This means we must check if
// the connection has indeed expired, prior to killing it.
let mut kill = false;
self.conn_map
.entry(key)
.and_modify(|conn| kill = conn.has_expired());
if kill {
self.kill_connection(key);
}
}
if self.killq.is_empty() && !self.killq.is_synced() {
self.killq = MuxerKillQ::from_conn_map(&self.conn_map);
METRICS.killq_resync.inc();
// If we've just re-created the kill queue, we can sweep it again; maybe there's
// more to kill.
self.sweep_killq();
}
}
/// Enqueue an RST packet into `self.rxq`.
///
/// Enqueue errors aren't propagated up the call chain, since there is nothing we can do to
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/unix/muxer_killq.rs | src/vmm/src/devices/virtio/vsock/unix/muxer_killq.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
/// `MuxerKillQ` implements a helper object that `VsockMuxer` can use for scheduling forced
/// connection termination. I.e. after one peer issues a clean shutdown request
/// (VSOCK_OP_SHUTDOWN), the concerned connection is queued for termination (VSOCK_OP_RST) in
/// the near future (herein implemented via an expiring timer).
///
/// Whenever the muxer needs to schedule a connection for termination, it pushes it (or rather
/// an identifier - the connection key) to this queue. A subsequent pop() operation will
/// succeed if and only if the first connection in the queue is ready to be terminated (i.e.
/// its kill timer expired).
///
/// Without using this queue, the muxer would have to walk its entire connection pool
/// (hashmap), whenever it needs to check for expired kill timers. With this queue, both
/// scheduling and termination are performed in constant time. However, since we don't want to
/// waste space on a kill queue that's as big as the connection hashmap itself, it is possible
/// that this queue may become full at times. We call this kill queue "synchronized" if we are
/// certain that all connections that are awaiting termination are present in the queue. This
/// means a simple constant-time pop() operation is enough to check whether any connections
/// need to be terminated. When the kill queue becomes full, though, pushing fails, so
/// connections that should be terminated are left out. The queue is not synchronized anymore.
/// When that happens, the muxer will first drain the queue, and then replace it with a new
/// queue, created by walking the connection pool, looking for connections that will be
/// expiring in the future.
use std::collections::{HashMap, VecDeque};
use std::time::Instant;
use super::muxer::ConnMapKey;
use super::{MuxerConnection, defs};
/// A kill queue item, holding the connection key and the scheduled time for termination.
#[derive(Debug, Clone, Copy)]
struct MuxerKillQItem {
key: ConnMapKey,
kill_time: Instant,
}
/// The connection kill queue: a FIFO structure, storing the connections that are scheduled for
/// termination.
#[derive(Debug)]
pub struct MuxerKillQ {
/// The kill queue contents.
q: VecDeque<MuxerKillQItem>,
/// The kill queue sync status:
/// - when true, all connections that are awaiting termination are guaranteed to be in this
/// queue;
/// - when false, some connections may have been left out.
synced: bool,
}
impl MuxerKillQ {
const SIZE: usize = defs::MUXER_KILLQ_SIZE as usize;
/// Trivial kill queue constructor.
pub fn new() -> Self {
Self {
q: VecDeque::with_capacity(Self::SIZE),
synced: true,
}
}
/// Create a kill queue by walking the connection pool, looking for connections that are
/// set to expire at some point in the future.
/// Note: if more than `Self::SIZE` connections are found, the queue will be created in an
/// out-of-sync state, and will be discarded after it is emptied.
pub fn from_conn_map(conn_map: &HashMap<ConnMapKey, MuxerConnection>) -> Self {
let mut q_buf: Vec<MuxerKillQItem> = Vec::with_capacity(Self::SIZE);
let mut synced = true;
for (key, conn) in conn_map.iter() {
if !conn.will_expire() {
continue;
}
if q_buf.len() >= Self::SIZE {
synced = false;
break;
}
q_buf.push(MuxerKillQItem {
key: *key,
kill_time: conn.expiry().unwrap(),
});
}
q_buf.sort_unstable_by_key(|it| it.kill_time);
Self {
q: q_buf.into(),
synced,
}
}
/// Push a connection key to the queue, scheduling it for termination at
/// `CONN_SHUTDOWN_TIMEOUT_MS` from now (the push time).
pub fn push(&mut self, key: ConnMapKey, kill_time: Instant) {
if !self.is_synced() || self.is_full() {
self.synced = false;
return;
}
self.q.push_back(MuxerKillQItem { key, kill_time });
}
/// Attempt to pop an expired connection from the kill queue.
///
/// This will succeed and return a connection key, only if the connection at the front of
/// the queue has expired. Otherwise, `None` is returned.
pub fn pop(&mut self) -> Option<ConnMapKey> {
if let Some(item) = self.q.front()
&& Instant::now() > item.kill_time
{
return self.q.pop_front().map(|entry| entry.key);
}
None
}
/// Check if the kill queue is synchronized with the connection pool.
pub fn is_synced(&self) -> bool {
self.synced
}
/// Check if the kill queue is empty, obviously.
pub fn is_empty(&self) -> bool {
self.q.len() == 0
}
/// Check if the kill queue is full.
pub fn is_full(&self) -> bool {
self.q.len() == Self::SIZE
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/csm/connection.rs | src/vmm/src/devices/virtio/vsock/csm/connection.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
use std::fmt::Debug;
/// The main job of `VsockConnection` is to forward data traffic, back and forth, between a
/// guest-side AF_VSOCK socket and a host-side generic `Read + Write + AsRawFd` stream, while
/// also managing its internal state.
/// To that end, `VsockConnection` implements:
/// - `VsockChannel` for:
/// - moving data from the host stream to a guest-provided RX buffer, via `recv_pkt()`; and
/// - moving data from a guest-provided TX buffer to the host stream, via `send_pkt()`; and
/// - updating its internal state, by absorbing control packets (anything other than
/// VSOCK_OP_RW).
/// - `VsockEpollListener` for getting notified about the availability of data or free buffer
/// space at the host stream.
///
/// Note: there is a certain asymmetry to the RX and TX data flows:
/// - RX transfers do not need any data buffering, since data is read straight from the
/// host stream and into the guest-provided RX buffer;
/// - TX transfers may require some data to be buffered by `VsockConnection`, if the host
/// peer can't keep up with reading the data that we're writing. This is because, once
/// the guest driver provides some data in a virtio TX buffer, the vsock device must
/// consume it. If that data can't be forwarded straight to the host stream, we'll
/// have to store it in a buffer (and flush it at a later time). Vsock flow control
/// ensures that our TX buffer doesn't overflow.
// The code in this file is best read with a fresh memory of the vsock protocol inner-workings.
// To help with that, here is a
//
// Short primer on the vsock protocol
// ----------------------------------
//
// 1. Establishing a connection A vsock connection is considered established after a two-way
// handshake:
// - the initiating peer sends a connection request packet (`hdr.op` == VSOCK_OP_REQUEST);
// then
// - the listening peer sends back a connection response packet (`hdr.op` ==
// VSOCK_OP_RESPONSE).
//
// 2. Terminating a connection When a peer wants to shut down an established connection, it
// sends a VSOCK_OP_SHUTDOWN packet. Two header flags are used with VSOCK_OP_SHUTDOWN,
// indicating the sender's intention:
// - VSOCK_FLAGS_SHUTDOWN_RCV: the sender will receive no more data for this connection; and
// - VSOCK_FLAGS_SHUTDOWN_SEND: the sender will send no more data for this connection.
// After a shutdown packet, the receiving peer will have some protocol-undefined time to
// flush its buffers, and then forcefully terminate the connection by sending back an RST
// packet. If the shutdown-initiating peer doesn't receive this RST packet during a timeout
// period, it will send one itself, thus terminating the connection.
// Note: a peer can send more than one VSOCK_OP_SHUTDOWN packets. However, read/write
// indications cannot be undone. E.g. once a "no-more-sending" promise was made, it
// cannot be taken back. That is, `hdr.flags` will be ORed between subsequent
// VSOCK_OP_SHUTDOWN packets.
//
// 3. Flow control Before sending a data packet (VSOCK_OP_RW), the sender must make sure that
// the receiver has enough free buffer space to store that data. If this condition is not
// respected, the receiving peer's behaviour is undefined. In this implementation, we
// forcefully terminate the connection by sending back a VSOCK_OP_RST packet. Note: all
// buffer space information is computed and stored on a per-connection basis. Peers keep
// each other informed about the free buffer space they have by filling in two packet header
// members with each packet they send:
// - `hdr.buf_alloc`: the total buffer space the peer has allocated for receiving data; and
// - `hdr.fwd_cnt`: the total number of bytes the peer has successfully flushed out of its
// buffer.
// One can figure out how much space its peer has available in its buffer by inspecting the
// difference between how much it has sent to the peer and how much the peer has flushed out
// (i.e. "forwarded", in the vsock spec terminology):
// `peer_free = peer_buf_alloc - (total_bytes_sent_to_peer - peer_fwd_cnt)`.
// Note: the above requires that peers constantly keep each other informed on their buffer
// space situation. However, since there are no receipt acknowledgement packets
// defined for the vsock protocol, packet flow can often be unidirectional (just one
// peer sending data to another), so the sender's information about the receiver's
// buffer space can get quickly outdated. The vsock protocol defines two solutions to
// this problem:
// 1. The sender can explicitly ask for a buffer space (i.e. "credit") update from its
// peer, via a VSOCK_OP_CREDIT_REQUEST packet, to which it will get a
// VSOCK_OP_CREDIT_UPDATE response (or any response will do, really, since credit
// information must be included in any packet);
// 2. The receiver can be proactive, and send VSOCK_OP_CREDIT_UPDATE packet, whenever
// it thinks its peer's information is out of date.
// Our implementation uses the proactive approach.
use std::io::{ErrorKind, Write};
use std::num::Wrapping;
use std::os::unix::io::{AsRawFd, RawFd};
use std::time::{Duration, Instant};
use log::{debug, error, info, warn};
use vm_memory::GuestMemoryError;
use vm_memory::io::{ReadVolatile, WriteVolatile};
use vmm_sys_util::epoll::EventSet;
use super::super::defs::uapi;
use super::super::{VsockChannel, VsockEpollListener, VsockError};
use super::txbuf::TxBuf;
use super::{ConnState, PendingRx, PendingRxSet, VsockCsmError, defs};
use crate::devices::virtio::vsock::metrics::METRICS;
use crate::devices::virtio::vsock::packet::{VsockPacketHeader, VsockPacketRx, VsockPacketTx};
use crate::logger::IncMetric;
use crate::utils::wrap_usize_to_u32;
/// Trait that vsock connection backends need to implement.
///
/// Used as an alias for `ReadVolatile + Write + WriteVolatile + AsRawFd`
/// (sadly, trait aliases are not supported,
/// <https://github.com/rust-lang/rfcs/pull/1733#issuecomment-243840014>).
pub trait VsockConnectionBackend: ReadVolatile + Write + WriteVolatile + AsRawFd {}
/// A self-managing connection object, that handles communication between a guest-side AF_VSOCK
/// socket and a host-side `ReadVolatile + Write + WriteVolatile + AsRawFd` stream.
#[derive(Debug)]
pub struct VsockConnection<S: VsockConnectionBackend> {
/// The current connection state.
state: ConnState,
/// The local CID. Most of the time this will be the constant `2` (the vsock host CID).
local_cid: u64,
/// The peer (guest) CID.
peer_cid: u64,
/// The local (host) port.
local_port: u32,
/// The peer (guest) port.
peer_port: u32,
/// The (connected) host-side stream.
stream: S,
/// The TX buffer for this connection.
tx_buf: TxBuf,
/// Total number of bytes that have been successfully written to `self.stream`, either
/// directly, or flushed from `self.tx_buf`.
fwd_cnt: Wrapping<u32>,
/// The amount of buffer space that the peer (guest) has allocated for this connection.
peer_buf_alloc: u32,
/// The total number of bytes that the peer has forwarded away.
peer_fwd_cnt: Wrapping<u32>,
/// The total number of bytes sent to the peer (guest vsock driver)
rx_cnt: Wrapping<u32>,
/// Our `self.fwd_cnt`, as last sent to the peer. This is used to provide proactive credit
/// updates, and let the peer know it's OK to send more data.
last_fwd_cnt_to_peer: Wrapping<u32>,
/// The set of pending RX packet indications that `recv_pkt()` will use to fill in a
/// packet for the peer (guest).
pending_rx: PendingRxSet,
/// Instant when this connection should be scheduled for immediate termination, due to some
/// timeout condition having been fulfilled.
expiry: Option<Instant>,
}
impl<S> VsockChannel for VsockConnection<S>
where
S: VsockConnectionBackend + Debug,
{
/// Fill in a vsock packet, to be delivered to our peer (the guest driver).
///
/// As per the `VsockChannel` trait, this should only be called when there is data to be
/// fetched from the channel (i.e. `has_pending_rx()` is true). Otherwise, it will error
/// out with `VsockError::NoData`.
/// Pending RX indications are set by other mutable actions performed on the channel. For
/// instance, `send_pkt()` could set an Rst indication, if called with a VSOCK_OP_SHUTDOWN
/// packet, or `notify()` could set a Rw indication (a data packet can be fetched from the
/// channel), if data was ready to be read from the host stream.
///
/// Returns:
/// - `Ok(())`: the packet has been successfully filled in and is ready for delivery;
/// - `Err(VsockError::NoData)`: there was no data available with which to fill in the packet;
/// - `Err(VsockError::PktBufMissing)`: the packet would've been filled in with data, but it is
/// missing the data buffer.
fn recv_pkt(&mut self, pkt: &mut VsockPacketRx) -> Result<(), VsockError> {
// Perform some generic initialization that is the same for any packet operation (e.g.
// source, destination, credit, etc).
self.init_pkt_hdr(&mut pkt.hdr);
METRICS.rx_packets_count.inc();
// If forceful termination is pending, there's no point in checking for anything else.
// It's dead, Jim.
if self.pending_rx.remove(PendingRx::Rst) {
pkt.hdr.set_op(uapi::VSOCK_OP_RST);
return Ok(());
}
// Next up: if we're due a connection confirmation, that's all we need to know to fill
// in this packet.
if self.pending_rx.remove(PendingRx::Response) {
self.state = ConnState::Established;
pkt.hdr.set_op(uapi::VSOCK_OP_RESPONSE);
return Ok(());
}
// Same thing goes for locally-initiated connections that need to yield a connection
// request.
if self.pending_rx.remove(PendingRx::Request) {
self.expiry =
Some(Instant::now() + Duration::from_millis(defs::CONN_REQUEST_TIMEOUT_MS));
pkt.hdr.set_op(uapi::VSOCK_OP_REQUEST);
return Ok(());
}
if self.pending_rx.remove(PendingRx::Rw) {
// We're due to produce a data packet, by reading the data from the host-side
// Unix socket.
match self.state {
// A data packet is only valid for established connections, and connections for
// which our peer has initiated a graceful shutdown, but can still receive data.
ConnState::Established | ConnState::PeerClosed(false, _) => (),
_ => {
// Any other connection state is invalid at this point, and we need to kill it
// with fire.
pkt.hdr.set_op(uapi::VSOCK_OP_RST);
return Ok(());
}
}
// Oh wait, before we start bringing in the big data, can our peer handle receiving so
// much bytey goodness?
if self.need_credit_update_from_peer() {
self.last_fwd_cnt_to_peer = self.fwd_cnt;
pkt.hdr.set_op(uapi::VSOCK_OP_CREDIT_REQUEST);
return Ok(());
}
// The maximum amount of data we can read in is limited by both the RX buffer size and
// the peer available buffer space.
let max_len = std::cmp::min(pkt.buf_size(), self.peer_avail_credit());
// Read data from the stream straight to the RX buffer, for maximum throughput.
match pkt.read_at_offset_from(&mut self.stream, 0, max_len) {
Ok(read_cnt) => {
if read_cnt == 0 {
// A 0-length read means the host stream was closed down. In that case,
// we'll ask our peer to shut down the connection. We can neither send nor
// receive any more data.
self.state = ConnState::LocalClosed;
self.expiry = Some(
Instant::now() + Duration::from_millis(defs::CONN_SHUTDOWN_TIMEOUT_MS),
);
pkt.hdr
.set_op(uapi::VSOCK_OP_SHUTDOWN)
.set_flag(uapi::VSOCK_FLAGS_SHUTDOWN_RCV)
.set_flag(uapi::VSOCK_FLAGS_SHUTDOWN_SEND);
} else {
// On a successful data read, we fill in the packet with the RW op, and
// length of the read data.
// Safe to unwrap because read_cnt is no more than max_len, which is bounded
// by self.peer_avail_credit(), a u32 internally.
pkt.hdr.set_op(uapi::VSOCK_OP_RW).set_len(read_cnt);
METRICS.rx_bytes_count.add(read_cnt as u64);
}
self.rx_cnt += Wrapping(pkt.hdr.len());
self.last_fwd_cnt_to_peer = self.fwd_cnt;
return Ok(());
}
Err(VsockError::GuestMemoryMmap(GuestMemoryError::IOError(err)))
if err.kind() == ErrorKind::WouldBlock =>
{
// This shouldn't actually happen (receiving EWOULDBLOCK after EPOLLIN), but
// apparently it does, so we need to handle it gracefully.
warn!(
"vsock: unexpected EWOULDBLOCK while reading from backing stream: lp={}, \
pp={}, err={:?}",
self.local_port, self.peer_port, err
);
}
Err(err) => {
// We are not expecting any other errors when reading from the underlying
// stream. If any show up, we'll immediately kill this connection.
METRICS.rx_read_fails.inc();
error!(
"vsock: error reading from backing stream: lp={}, pp={}, err={:?}",
self.local_port, self.peer_port, err
);
pkt.hdr.set_op(uapi::VSOCK_OP_RST);
self.last_fwd_cnt_to_peer = self.fwd_cnt;
return Ok(());
}
};
}
// A credit update is basically a no-op, so we should only waste a perfectly fine RX
// buffer on it if we really have nothing else to say, hence we check for this RX
// indication last.
if self.pending_rx.remove(PendingRx::CreditUpdate) && !self.has_pending_rx() {
pkt.hdr.set_op(uapi::VSOCK_OP_CREDIT_UPDATE);
self.last_fwd_cnt_to_peer = self.fwd_cnt;
return Ok(());
}
// We've already checked for all conditions that would have produced a packet, so
// if we got to here, we don't know how to yield one.
Err(VsockError::NoData)
}
/// Deliver a guest-generated packet to this connection.
///
/// This forwards the data in RW packets to the host stream, and absorbs control packets,
/// using them to manage the internal connection state.
///
/// Returns:
/// always `Ok(())`: the packet has been consumed;
fn send_pkt(&mut self, pkt: &VsockPacketTx) -> Result<(), VsockError> {
// Update the peer credit information.
self.peer_buf_alloc = pkt.hdr.buf_alloc();
self.peer_fwd_cnt = Wrapping(pkt.hdr.fwd_cnt());
METRICS.tx_packets_count.inc();
match self.state {
// Most frequent case: this is an established connection that needs to forward some
// data to the host stream. Also works for a connection that has begun shutting
// down, but the peer still has some data to send.
ConnState::Established | ConnState::PeerClosed(_, false)
if pkt.hdr.op() == uapi::VSOCK_OP_RW =>
{
if pkt.buf_size() == 0 {
info!(
"vsock: dropping empty data packet from guest (lp={}, pp={}",
self.local_port, self.peer_port
);
return Ok(());
}
// Unwrapping here is safe, since we just checked `pkt.buf()` above.
if let Err(err) = self.send_bytes(pkt) {
// If we can't write to the host stream, that's an unrecoverable error, so
// we'll terminate this connection.
warn!(
"vsock: error writing to local stream (lp={}, pp={}): {:?}",
self.local_port, self.peer_port, err
);
self.kill();
return Ok(());
}
// We might've just consumed some data. If that's the case, we might need to
// update the peer on our buffer space situation, so that it can keep sending
// data packets our way.
if self.peer_needs_credit_update() {
self.pending_rx.insert(PendingRx::CreditUpdate);
}
}
// Next up: receiving a response / confirmation for a host-initiated connection.
// We'll move to an Established state, and pass on the good news through the host
// stream.
ConnState::LocalInit if pkt.hdr.op() == uapi::VSOCK_OP_RESPONSE => {
self.expiry = None;
self.state = ConnState::Established;
}
// The peer wants to shut down an established connection. If they have nothing
// more to send nor receive, and we don't have to wait to drain our TX buffer, we
// can schedule an RST packet (to terminate the connection on the next recv call).
// Otherwise, we'll arm the kill timer.
ConnState::Established if pkt.hdr.op() == uapi::VSOCK_OP_SHUTDOWN => {
let recv_off = pkt.hdr.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_RCV != 0;
let send_off = pkt.hdr.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_SEND != 0;
self.state = ConnState::PeerClosed(recv_off, send_off);
if recv_off && send_off {
if self.tx_buf.is_empty() {
self.pending_rx.insert(PendingRx::Rst);
} else {
self.expiry = Some(
Instant::now() + Duration::from_millis(defs::CONN_SHUTDOWN_TIMEOUT_MS),
);
}
}
}
// The peer wants to update a shutdown request, with more receive/send indications.
// The same logic as above applies.
ConnState::PeerClosed(ref mut recv_off, ref mut send_off)
if pkt.hdr.op() == uapi::VSOCK_OP_SHUTDOWN =>
{
*recv_off = *recv_off || (pkt.hdr.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_RCV != 0);
*send_off = *send_off || (pkt.hdr.flags() & uapi::VSOCK_FLAGS_SHUTDOWN_SEND != 0);
if *recv_off && *send_off && self.tx_buf.is_empty() {
self.pending_rx.insert(PendingRx::Rst);
}
}
// A credit update from our peer is valid only in a state which allows data
// transfer towards the peer.
ConnState::Established | ConnState::PeerInit | ConnState::PeerClosed(false, _)
if pkt.hdr.op() == uapi::VSOCK_OP_CREDIT_UPDATE =>
{
// Nothing to do here; we've already updated peer credit.
}
// A credit request from our peer is valid only in a state which allows data
// transfer from the peer. We'll respond with a credit update packet.
ConnState::Established | ConnState::PeerInit | ConnState::PeerClosed(_, false)
if pkt.hdr.op() == uapi::VSOCK_OP_CREDIT_REQUEST =>
{
self.pending_rx.insert(PendingRx::CreditUpdate);
}
_ => {
debug!(
"vsock: dropping invalid TX pkt for connection: state={:?}, pkt.hdr={:?}",
self.state, pkt.hdr
);
}
};
Ok(())
}
/// Check if the connection has any pending packet addressed to the peer.
fn has_pending_rx(&self) -> bool {
!self.pending_rx.is_empty()
}
}
impl<S> AsRawFd for VsockConnection<S>
where
S: VsockConnectionBackend + Debug,
{
/// Get the file descriptor that this connection wants polled.
///
/// The connection is interested in being notified about EPOLLIN / EPOLLOUT events on the
/// host stream.
fn as_raw_fd(&self) -> RawFd {
self.stream.as_raw_fd()
}
}
impl<S> VsockEpollListener for VsockConnection<S>
where
S: VsockConnectionBackend + Debug,
{
/// Get the event set that this connection is interested in.
///
/// A connection will want to be notified when:
/// - data is available to be read from the host stream, so that it can store an RW pending RX
/// indication; and
/// - data can be written to the host stream, and the TX buffer needs to be flushed.
fn get_polled_evset(&self) -> EventSet {
let mut evset = EventSet::empty();
if !self.tx_buf.is_empty() {
// There's data waiting in the TX buffer, so we are interested in being notified
// when writing to the host stream wouldn't block.
evset.insert(EventSet::OUT);
}
// We're generally interested in being notified when data can be read from the host
// stream, unless we're in a state which doesn't allow moving data from host to guest.
match self.state {
ConnState::Killed | ConnState::LocalClosed | ConnState::PeerClosed(true, _) => (),
_ if self.need_credit_update_from_peer() => (),
_ => evset.insert(EventSet::IN),
}
evset
}
/// Notify the connection about an event (or set of events) that it was interested in.
fn notify(&mut self, evset: EventSet) {
if evset.contains(EventSet::IN) {
// Data can be read from the host stream. Setting a Rw pending indication, so that
// the muxer will know to call `recv_pkt()` later.
self.pending_rx.insert(PendingRx::Rw);
}
if evset.contains(EventSet::OUT) {
// Data can be written to the host stream. Time to flush out the TX buffer.
//
if self.tx_buf.is_empty() {
METRICS.conn_event_fails.inc();
info!("vsock: connection received unexpected EPOLLOUT event");
return;
}
let flushed = self
.tx_buf
.flush_to(&mut self.stream)
.unwrap_or_else(|err| {
METRICS.tx_flush_fails.inc();
warn!(
"vsock: error flushing TX buf for (lp={}, pp={}): {:?}",
self.local_port, self.peer_port, err
);
match err {
VsockCsmError::TxBufFlush(inner)
if inner.kind() == ErrorKind::WouldBlock =>
{
// This should never happen (EWOULDBLOCK after EPOLLOUT), but
// it does, so let's absorb it.
}
_ => self.kill(),
};
0
});
self.fwd_cnt += wrap_usize_to_u32(flushed);
METRICS.tx_bytes_count.add(flushed as u64);
// If this connection was shutting down, but is waiting to drain the TX buffer
// before forceful termination, the wait might be over.
if self.state == ConnState::PeerClosed(true, true) && self.tx_buf.is_empty() {
self.pending_rx.insert(PendingRx::Rst);
} else if self.peer_needs_credit_update() {
// If we've freed up some more buffer space, we may need to let the peer know it
// can safely send more data our way.
self.pending_rx.insert(PendingRx::CreditUpdate);
}
}
}
}
impl<S> VsockConnection<S>
where
S: VsockConnectionBackend + Debug,
{
/// Create a new guest-initiated connection object.
pub fn new_peer_init(
stream: S,
local_cid: u64,
peer_cid: u64,
local_port: u32,
peer_port: u32,
peer_buf_alloc: u32,
) -> Self {
Self {
local_cid,
peer_cid,
local_port,
peer_port,
stream,
state: ConnState::PeerInit,
tx_buf: TxBuf::new(),
fwd_cnt: Wrapping(0),
peer_buf_alloc,
peer_fwd_cnt: Wrapping(0),
rx_cnt: Wrapping(0),
last_fwd_cnt_to_peer: Wrapping(0),
pending_rx: PendingRxSet::from(PendingRx::Response),
expiry: None,
}
}
/// Create a new host-initiated connection object.
pub fn new_local_init(
stream: S,
local_cid: u64,
peer_cid: u64,
local_port: u32,
peer_port: u32,
) -> Self {
Self {
local_cid,
peer_cid,
local_port,
peer_port,
stream,
state: ConnState::LocalInit,
tx_buf: TxBuf::new(),
fwd_cnt: Wrapping(0),
peer_buf_alloc: 0,
peer_fwd_cnt: Wrapping(0),
rx_cnt: Wrapping(0),
last_fwd_cnt_to_peer: Wrapping(0),
pending_rx: PendingRxSet::from(PendingRx::Request),
expiry: None,
}
}
/// Check if there is an expiry (kill) timer set for this connection, sometime in the
/// future.
pub fn will_expire(&self) -> bool {
match self.expiry {
None => false,
Some(t) => t > Instant::now(),
}
}
/// Check if this connection needs to be scheduled for forceful termination, due to its
/// kill timer having expired.
pub fn has_expired(&self) -> bool {
match self.expiry {
None => false,
Some(t) => t <= Instant::now(),
}
}
/// Get the kill timer value, if one is set.
pub fn expiry(&self) -> Option<Instant> {
self.expiry
}
/// Schedule the connection to be forcefully terminated ASAP (i.e. the next time the
/// connection is asked to yield a packet, via `recv_pkt()`).
pub fn kill(&mut self) {
self.state = ConnState::Killed;
self.pending_rx.insert(PendingRx::Rst);
}
/// Return the connections state.
pub fn state(&self) -> ConnState {
self.state
}
/// Send some raw, untracked, data straight to the underlying connected stream.
/// Returns: number of bytes written, or the error describing the write failure.
///
/// Warning: this will bypass the connection state machine and write directly to the
/// underlying stream. No account of this write is kept, which includes bypassing
/// vsock flow control.
pub fn send_bytes_raw(&mut self, buf: &[u8]) -> Result<usize, VsockCsmError> {
self.stream.write(buf).map_err(VsockCsmError::StreamWrite)
}
/// Send some raw data (a byte-slice) to the host stream.
///
/// Raw data can either be sent straight to the host stream, or to our TX buffer, if the
/// former fails.
fn send_bytes(&mut self, pkt: &VsockPacketTx) -> Result<(), VsockError> {
let len = pkt.hdr.len();
// If there is data in the TX buffer, that means we're already registered for EPOLLOUT
// events on the underlying stream. Therefore, there's no point in attempting a write
// at this point. `self.notify()` will get called when EPOLLOUT arrives, and it will
// attempt to drain the TX buffer then.
if !self.tx_buf.is_empty() {
return pkt
.write_from_offset_to(&mut self.tx_buf, 0, len)
.map(|_| ());
}
// The TX buffer is empty, so we can try to write straight to the host stream.
let written = match pkt.write_from_offset_to(&mut self.stream, 0, len) {
Ok(cnt) => cnt,
Err(VsockError::GuestMemoryMmap(GuestMemoryError::IOError(err)))
if err.kind() == ErrorKind::WouldBlock =>
{
// Absorb any would-block errors, since we can always try again later.
0
}
Err(err) => {
// We don't know how to handle any other write error, so we'll send it up
// the call chain.
METRICS.tx_write_fails.inc();
return Err(err);
}
};
// Move the "forwarded bytes" counter ahead by how much we were able to send out.
// Safe to unwrap because the maximum value is pkt.len(), which is a u32.
self.fwd_cnt += written;
METRICS.tx_bytes_count.add(written as u64);
// If we couldn't write the whole slice, we'll need to push the remaining data to our
// buffer.
if written < len {
pkt.write_from_offset_to(&mut self.tx_buf, written, len - written)?;
}
Ok(())
}
/// Check if the credit information the peer has last received from us is outdated.
fn peer_needs_credit_update(&self) -> bool {
let peer_seen_free_buf =
Wrapping(defs::CONN_TX_BUF_SIZE) - (self.fwd_cnt - self.last_fwd_cnt_to_peer);
peer_seen_free_buf < Wrapping(defs::CONN_CREDIT_UPDATE_THRESHOLD)
}
/// Check if we need to ask the peer for a credit update before sending any more data its
/// way.
fn need_credit_update_from_peer(&self) -> bool {
self.peer_avail_credit() == 0
}
/// Get the maximum number of bytes that we can send to our peer, without overflowing its
/// buffer.
fn peer_avail_credit(&self) -> u32 {
(Wrapping(self.peer_buf_alloc) - (self.rx_cnt - self.peer_fwd_cnt)).0
}
/// Prepare a packet header for transmission to our peer.
fn init_pkt_hdr(&self, hdr: &mut VsockPacketHeader) {
hdr.set_src_cid(self.local_cid)
.set_dst_cid(self.peer_cid)
.set_src_port(self.local_port)
.set_dst_port(self.peer_port)
.set_type(uapi::VSOCK_TYPE_STREAM)
.set_buf_alloc(defs::CONN_TX_BUF_SIZE)
.set_fwd_cnt(self.fwd_cnt.0);
}
}
#[cfg(test)]
mod tests {
use std::io::{Error as IoError, ErrorKind, Write};
use std::os::unix::io::RawFd;
use std::time::{Duration, Instant};
use vm_memory::{VolatileMemoryError, VolatileSlice};
use vmm_sys_util::eventfd::EventFd;
use super::super::super::defs::uapi;
use super::super::defs as csm_defs;
use super::*;
use crate::devices::virtio::vsock::device::{RXQ_INDEX, TXQ_INDEX};
use crate::devices::virtio::vsock::test_utils;
use crate::devices::virtio::vsock::test_utils::TestContext;
use crate::vstate::memory::BitmapSlice;
const LOCAL_CID: u64 = 2;
const PEER_CID: u64 = 3;
const LOCAL_PORT: u32 = 1002;
const PEER_PORT: u32 = 1003;
const PEER_BUF_ALLOC: u32 = 64 * 1024;
#[derive(Debug)]
enum StreamState {
Closed,
Error(ErrorKind),
Ready,
WouldBlock,
}
#[derive(Debug)]
struct TestStream {
fd: EventFd,
read_buf: Vec<u8>,
read_state: StreamState,
write_buf: Vec<u8>,
write_state: StreamState,
}
impl TestStream {
fn new() -> Self {
Self {
fd: EventFd::new(libc::EFD_NONBLOCK).unwrap(),
read_state: StreamState::Ready,
write_state: StreamState::Ready,
read_buf: Vec::new(),
write_buf: Vec::new(),
}
}
fn new_with_read_buf(buf: &[u8]) -> Self {
let mut stream = Self::new();
stream.read_buf = buf.to_vec();
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs | src/vmm/src/devices/virtio/vsock/csm/txbuf.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
use std::fmt::Debug;
use std::io::Write;
use std::num::Wrapping;
use vm_memory::{VolatileMemoryError, VolatileSlice, WriteVolatile};
use super::{VsockCsmError, defs};
use crate::utils::wrap_usize_to_u32;
use crate::vstate::memory::{BitmapSlice, Bytes};
/// A simple ring-buffer implementation, used by vsock connections to buffer TX (guest -> host)
/// data. Memory for this buffer is allocated lazily, since buffering will only be needed when
/// the host can't read fast enough.
#[derive(Debug)]
pub struct TxBuf {
/// The actual u8 buffer - only allocated after the first push.
data: Option<Box<[u8]>>,
/// Ring-buffer head offset - where new data is pushed to.
head: Wrapping<u32>,
/// Ring-buffer tail offset - where data is flushed from.
tail: Wrapping<u32>,
}
impl TxBuf {
/// Total buffer size, in bytes.
const SIZE: usize = defs::CONN_TX_BUF_SIZE as usize;
/// Ring-buffer constructor.
pub fn new() -> Self {
Self {
data: None,
head: Wrapping(0),
tail: Wrapping(0),
}
}
/// Get the used length of this buffer - number of bytes that have been pushed in, but not
/// yet flushed out.
pub fn len(&self) -> usize {
(self.head - self.tail).0 as usize
}
/// Push a byte slice onto the ring-buffer.
///
/// Either the entire source slice will be pushed to the ring-buffer, or none of it, if
/// there isn't enough room, in which case `Err(Error::TxBufFull)` is returned.
pub fn push(&mut self, src: &VolatileSlice<impl BitmapSlice>) -> Result<(), VsockCsmError> {
// Error out if there's no room to push the entire slice.
if self.len() + src.len() > Self::SIZE {
return Err(VsockCsmError::TxBufFull);
}
let data = self
.data
.get_or_insert_with(|| vec![0u8; Self::SIZE].into_boxed_slice());
// Buffer head, as an offset into the data slice.
let head_ofs = self.head.0 as usize % Self::SIZE;
// Pushing a slice to this buffer can take either one or two slice copies: - one copy,
// if the slice fits between `head_ofs` and `Self::SIZE`; or - two copies, if the
// ring-buffer head wraps around.
// First copy length: we can only go from the head offset up to the total buffer size.
let len = std::cmp::min(Self::SIZE - head_ofs, src.len());
let _ = src.read(&mut data[head_ofs..(head_ofs + len)], 0);
// If the slice didn't fit, the buffer head will wrap around, and pushing continues
// from the start of the buffer (`&self.data[0]`).
if len < src.len() {
let _ = src.read(&mut data[..(src.len() - len)], len);
}
// Either way, we've just pushed exactly `src.len()` bytes, so that's the amount by
// which the (wrapping) buffer head needs to move forward.
self.head += wrap_usize_to_u32(src.len());
Ok(())
}
/// Flush the contents of the ring-buffer to a writable stream.
///
/// Return the number of bytes that have been transferred out of the ring-buffer and into
/// the writable stream.
pub fn flush_to<W: Write + Debug>(&mut self, sink: &mut W) -> Result<usize, VsockCsmError> {
// Nothing to do, if this buffer holds no data.
if self.is_empty() {
return Ok(0);
}
// Buffer tail, as an offset into the buffer data slice.
let tail_ofs = self.tail.0 as usize % Self::SIZE;
// Flushing the buffer can take either one or two writes:
// - one write, if the tail doesn't need to wrap around to reach the head; or
// - two writes, if the tail would wrap around: tail to slice end, then slice end to head.
// First write length: the lesser of tail to slice end, or tail to head.
let len_to_write = std::cmp::min(Self::SIZE - tail_ofs, self.len());
// It's safe to unwrap here, since we've already checked if the buffer was empty.
let data = self.data.as_ref().unwrap();
// Issue the first write and absorb any `WouldBlock` error (we can just try again
// later).
let written = sink
.write(&data[tail_ofs..(tail_ofs + len_to_write)])
.map_err(VsockCsmError::TxBufFlush)?;
// Move the buffer tail ahead by the amount (of bytes) we were able to flush out.
self.tail += wrap_usize_to_u32(written);
// If we weren't able to flush out as much as we tried, there's no point in attempting
// our second write.
if written < len_to_write {
return Ok(written);
}
// Attempt our second write. This will return immediately if a second write isn't
// needed, since checking for an empty buffer is the first thing we do in this
// function.
//
// Interesting corner case: if we've already written some data in the first pass,
// and then the second write fails, we will consider the flush action a success
// and return the number of bytes written in the first pass.
Ok(written + self.flush_to(sink).unwrap_or(0))
}
/// Check if the buffer holds any data that hasn't yet been flushed out.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl WriteVolatile for TxBuf {
fn write_volatile<B: BitmapSlice>(
&mut self,
buf: &VolatileSlice<B>,
) -> Result<usize, VolatileMemoryError> {
self.push(buf)
.map(|()| buf.len())
.map_err(|err| VolatileMemoryError::IOError(std::io::Error::other(err)))
}
}
#[cfg(test)]
mod tests {
use std::io::{Error as IoError, ErrorKind, Write};
use super::*;
#[derive(Debug)]
struct TestSink {
data: Vec<u8>,
err: Option<IoError>,
capacity: usize,
}
impl TestSink {
const DEFAULT_CAPACITY: usize = 2 * TxBuf::SIZE;
fn new() -> Self {
Self {
data: Vec::with_capacity(Self::DEFAULT_CAPACITY),
err: None,
capacity: Self::DEFAULT_CAPACITY,
}
}
}
impl Write for TestSink {
fn write(&mut self, src: &[u8]) -> Result<usize, IoError> {
if self.err.is_some() {
return Err(self.err.take().unwrap());
}
let len_to_push = std::cmp::min(self.capacity - self.data.len(), src.len());
self.data.extend_from_slice(&src[..len_to_push]);
Ok(len_to_push)
}
fn flush(&mut self) -> Result<(), IoError> {
Ok(())
}
}
impl TestSink {
fn clear(&mut self) {
self.data = Vec::with_capacity(self.capacity);
self.err = None;
}
fn set_err(&mut self, err: IoError) {
self.err = Some(err);
}
fn set_capacity(&mut self, capacity: usize) {
self.capacity = capacity;
if self.data.len() > self.capacity {
self.data.resize(self.capacity, 0);
}
}
}
#[test]
fn test_push_nowrap() {
let mut txbuf = TxBuf::new();
let mut sink = TestSink::new();
assert!(txbuf.is_empty());
assert!(txbuf.data.is_none());
txbuf
.push(&VolatileSlice::from([1, 2, 3, 4].as_mut_slice()))
.unwrap();
txbuf
.push(&VolatileSlice::from([5, 6, 7, 8].as_mut_slice()))
.unwrap();
txbuf.flush_to(&mut sink).unwrap();
assert_eq!(sink.data, [1, 2, 3, 4, 5, 6, 7, 8]);
sink.clear();
txbuf
.write_all_volatile(&VolatileSlice::from([10, 11, 12, 13].as_mut_slice()))
.unwrap();
txbuf
.write_all_volatile(&VolatileSlice::from([14, 15, 16, 17].as_mut_slice()))
.unwrap();
txbuf.flush_to(&mut sink).unwrap();
assert_eq!(sink.data, [10, 11, 12, 13, 14, 15, 16, 17]);
sink.clear();
}
#[test]
fn test_push_wrap() {
let mut txbuf = TxBuf::new();
let mut sink = TestSink::new();
let mut tmp: Vec<u8> = vec![0; TxBuf::SIZE - 2];
txbuf
.push(&VolatileSlice::from(tmp.as_mut_slice()))
.unwrap();
txbuf.flush_to(&mut sink).unwrap();
sink.clear();
txbuf
.push(&VolatileSlice::from([1, 2, 3, 4].as_mut_slice()))
.unwrap();
assert_eq!(txbuf.flush_to(&mut sink).unwrap(), 4);
assert_eq!(sink.data, [1, 2, 3, 4]);
sink.clear();
txbuf
.write_all_volatile(&VolatileSlice::from([5, 6, 7, 8].as_mut_slice()))
.unwrap();
assert_eq!(txbuf.flush_to(&mut sink).unwrap(), 4);
assert_eq!(sink.data, [5, 6, 7, 8]);
}
#[test]
fn test_push_error() {
let mut txbuf = TxBuf::new();
let mut tmp = Vec::with_capacity(TxBuf::SIZE);
tmp.resize(TxBuf::SIZE - 1, 0);
txbuf
.push(&VolatileSlice::from(tmp.as_mut_slice()))
.unwrap();
match txbuf.push(&VolatileSlice::from([1, 2].as_mut_slice())) {
Err(VsockCsmError::TxBufFull) => (),
other => panic!("Unexpected result: {:?}", other),
}
match txbuf.write_volatile(&VolatileSlice::from([1, 2].as_mut_slice())) {
Err(err) => {
assert_eq!(
format!("{}", err),
"Attempted to push data to a full TX buffer"
);
}
other => panic!("Unexpected result: {:?}", other),
}
}
#[test]
fn test_incomplete_flush() {
let mut txbuf = TxBuf::new();
let mut sink = TestSink::new();
sink.set_capacity(2);
txbuf
.push(&VolatileSlice::from([1, 2, 3, 4].as_mut_slice()))
.unwrap();
assert_eq!(txbuf.flush_to(&mut sink).unwrap(), 2);
assert_eq!(txbuf.len(), 2);
assert_eq!(sink.data, [1, 2]);
sink.set_capacity(4);
assert_eq!(txbuf.flush_to(&mut sink).unwrap(), 2);
assert!(txbuf.is_empty());
assert_eq!(sink.data, [1, 2, 3, 4]);
}
#[test]
fn test_flush_error() {
const EACCESS: i32 = 13;
let mut txbuf = TxBuf::new();
let mut sink = TestSink::new();
txbuf
.push(&VolatileSlice::from([1, 2, 3, 4].as_mut_slice()))
.unwrap();
let io_err = IoError::from_raw_os_error(EACCESS);
sink.set_err(io_err);
match txbuf.flush_to(&mut sink) {
Err(VsockCsmError::TxBufFlush(ref err))
if err.kind() == ErrorKind::PermissionDenied => {}
other => panic!("Unexpected result: {:?}", other),
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/devices/virtio/vsock/csm/mod.rs | src/vmm/src/devices/virtio/vsock/csm/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
/// This module implements our vsock connection state machine. The heavy lifting is done by
/// `connection::VsockConnection`, while this file only defines some constants and helper structs.
mod connection;
mod txbuf;
pub use connection::{VsockConnection, VsockConnectionBackend};
pub mod defs {
/// Vsock connection TX buffer capacity.
pub const CONN_TX_BUF_SIZE: u32 = 64 * 1024;
/// When the guest thinks we have less than this amount of free buffer space,
/// we will send them a credit update packet.
pub const CONN_CREDIT_UPDATE_THRESHOLD: u32 = 4 * 1024;
/// Connection request timeout, in millis.
pub const CONN_REQUEST_TIMEOUT_MS: u64 = 2000;
/// Connection graceful shutdown timeout, in millis.
pub const CONN_SHUTDOWN_TIMEOUT_MS: u64 = 2000;
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VsockCsmError {
/// Attempted to push data to a full TX buffer
TxBufFull,
/// An I/O error occurred, when attempting to flush the connection TX buffer: {0}
TxBufFlush(std::io::Error),
/// An I/O error occurred, when attempting to write data to the host-side stream: {0}
StreamWrite(std::io::Error),
}
/// A vsock connection state.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ConnState {
/// The connection has been initiated by the host end, but is yet to be confirmed by the guest.
LocalInit,
/// The connection has been initiated by the guest, but we are yet to confirm it, by sending
/// a response packet (VSOCK_OP_RESPONSE).
PeerInit,
/// The connection handshake has been performed successfully, and data can now be exchanged.
Established,
/// The host (AF_UNIX) socket was closed.
LocalClosed,
/// A VSOCK_OP_SHUTDOWN packet was received from the guest. The tuple represents the guest R/W
/// indication: (will_not_recv_anymore_data, will_not_send_anymore_data).
PeerClosed(bool, bool),
/// The connection is scheduled to be forcefully terminated as soon as possible.
Killed,
}
/// An RX indication, used by `VsockConnection` to schedule future `recv_pkt()` responses.
/// For instance, after being notified that there is available data to be read from the host stream
/// (via `notify()`), the connection will store a `PendingRx::Rw` to be later inspected by
/// `recv_pkt()`.
#[derive(Debug, Clone, Copy, PartialEq)]
enum PendingRx {
/// We need to yield a connection request packet (VSOCK_OP_REQUEST).
Request = 0,
/// We need to yield a connection response packet (VSOCK_OP_RESPONSE).
Response = 1,
/// We need to yield a forceful connection termination packet (VSOCK_OP_RST).
Rst = 2,
/// We need to yield a data packet (VSOCK_OP_RW), by reading from the AF_UNIX socket.
Rw = 3,
/// We need to yield a credit update packet (VSOCK_OP_CREDIT_UPDATE).
CreditUpdate = 4,
}
impl PendingRx {
/// Transform the enum value into a bitmask, that can be used for set operations.
fn into_mask(self) -> u16 {
1u16 << (self as u16)
}
}
/// A set of RX indications (`PendingRx` items).
#[derive(Debug)]
struct PendingRxSet {
data: u16,
}
impl PendingRxSet {
/// Insert an item into the set.
fn insert(&mut self, it: PendingRx) {
self.data |= it.into_mask();
}
/// Remove an item from the set and return:
/// - true, if the item was in the set; or
/// - false, if the item wasn't in the set.
fn remove(&mut self, it: PendingRx) -> bool {
let ret = self.contains(it);
self.data &= !it.into_mask();
ret
}
/// Check if an item is present in this set.
fn contains(&self, it: PendingRx) -> bool {
self.data & it.into_mask() != 0
}
/// Check if the set is empty.
fn is_empty(&self) -> bool {
self.data == 0
}
}
/// Create a set containing only one item.
impl From<PendingRx> for PendingRxSet {
fn from(it: PendingRx) -> Self {
Self {
data: it.into_mask(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_display_error() {
assert_eq!(
format!("{}", VsockCsmError::TxBufFull),
"Attempted to push data to a full TX buffer"
);
assert_eq!(
VsockCsmError::TxBufFlush(std::io::Error::from(std::io::ErrorKind::Other)).to_string(),
"An I/O error occurred, when attempting to flush the connection TX buffer: other error"
);
assert_eq!(
VsockCsmError::StreamWrite(std::io::Error::from(std::io::ErrorKind::Other)).to_string(),
"An I/O error occurred, when attempting to write data to the host-side stream: other \
error"
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.