repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/acpi-tables/src/dsdt.rs | src/acpi-tables/src/dsdt.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::mem::size_of;
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
use zerocopy::IntoBytes;
use crate::{AcpiError, Result, Sdt, SdtHeader, checksum};
/// Differentiated System Description Table (DSDT)
///
/// Table that includes hardware definition blocks.
/// More information about this table can be found in the ACPI specification:
/// https://uefi.org/specs/ACPI/6.5/05_ACPI_Software_Programming_Model.html#differentiated-system-description-table-dsdt
#[derive(Debug, Clone)]
pub struct Dsdt {
header: SdtHeader,
definition_block: Vec<u8>,
}
impl Dsdt {
pub fn new(
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
definition_block: Vec<u8>,
) -> Self {
let header = SdtHeader::new(
*b"DSDT",
(size_of::<SdtHeader>() + definition_block.len())
.try_into()
.unwrap(),
2,
oem_id,
oem_table_id,
oem_revision,
);
let mut dsdt = Dsdt {
header,
definition_block,
};
dsdt.header.checksum =
checksum(&[dsdt.header.as_bytes(), dsdt.definition_block.as_slice()]);
dsdt
}
}
impl Sdt for Dsdt {
fn len(&self) -> usize {
self.header.length.get() as usize
}
fn write_to_guest<AS: GuestMemory>(&mut self, mem: &AS, address: GuestAddress) -> Result<()> {
mem.write_slice(self.header.as_bytes(), address)?;
let address = address
.checked_add(size_of::<SdtHeader>() as u64)
.ok_or(AcpiError::InvalidGuestAddress)?;
mem.write_slice(self.definition_block.as_slice(), address)?;
Ok(())
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/snapshot-editor/src/info.rs | src/snapshot-editor/src/info.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::path::PathBuf;
use clap::Subcommand;
use vmm::persist::MicrovmState;
use vmm::snapshot::Snapshot;
use crate::utils::*;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum InfoVmStateError {
/// {0}
Utils(#[from] UtilsError),
}
#[derive(Debug, Subcommand)]
pub enum InfoVmStateSubCommand {
/// Print snapshot version.
Version {
/// Path to the vmstate file.
#[arg(short, long)]
vmstate_path: PathBuf,
},
/// Print info about vcpu states.
VcpuStates {
/// Path to the vmstate file.
#[arg(short, long)]
vmstate_path: PathBuf,
},
/// Print readable MicroVM state.
VmState {
/// Path to the vmstate file.
#[arg(short, long)]
vmstate_path: PathBuf,
},
}
pub fn info_vmstate_command(command: InfoVmStateSubCommand) -> Result<(), InfoVmStateError> {
match command {
InfoVmStateSubCommand::Version { vmstate_path } => info(&vmstate_path, info_version)?,
InfoVmStateSubCommand::VcpuStates { vmstate_path } => {
info(&vmstate_path, info_vcpu_states)?
}
InfoVmStateSubCommand::VmState { vmstate_path } => info(&vmstate_path, info_vmstate)?,
}
Ok(())
}
fn info(
vmstate_path: &PathBuf,
f: impl Fn(&Snapshot<MicrovmState>) -> Result<(), InfoVmStateError>,
) -> Result<(), InfoVmStateError> {
let snapshot = open_vmstate(vmstate_path)?;
f(&snapshot)?;
Ok(())
}
fn info_version(snapshot: &Snapshot<MicrovmState>) -> Result<(), InfoVmStateError> {
println!("v{}", snapshot.version());
Ok(())
}
fn info_vcpu_states(snapshot: &Snapshot<MicrovmState>) -> Result<(), InfoVmStateError> {
for (i, state) in snapshot.data.vcpu_states.iter().enumerate() {
println!("vcpu {i}:");
println!("{state:#?}");
}
Ok(())
}
fn info_vmstate(snapshot: &Snapshot<MicrovmState>) -> Result<(), InfoVmStateError> {
println!("{:#?}", snapshot.data);
Ok(())
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/snapshot-editor/src/edit_vmstate.rs | src/snapshot-editor/src/edit_vmstate.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::path::PathBuf;
use clap::Subcommand;
use clap_num::maybe_hex;
use vmm::arch::aarch64::regs::Aarch64RegisterVec;
use vmm::persist::MicrovmState;
use crate::utils::{UtilsError, open_vmstate, save_vmstate};
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum EditVmStateError {
/// {0}
Utils(#[from] UtilsError),
}
#[derive(Debug, Subcommand)]
pub enum EditVmStateSubCommand {
/// Remove registers from vcpu states.
RemoveRegs {
/// Set of registers to remove.
/// Values should be registers ids as the are defined in KVM.
#[arg(value_parser=maybe_hex::<u64>, num_args = 1.., value_delimiter = ' ')]
regs: Vec<u64>,
/// Path to the vmstate file.
#[arg(short, long)]
vmstate_path: PathBuf,
/// Path of output file.
#[arg(short, long)]
output_path: PathBuf,
},
}
pub fn edit_vmstate_command(command: EditVmStateSubCommand) -> Result<(), EditVmStateError> {
match command {
EditVmStateSubCommand::RemoveRegs {
regs,
vmstate_path,
output_path,
} => edit(&vmstate_path, &output_path, |state| {
remove_regs(state, ®s)
})?,
}
Ok(())
}
fn edit(
vmstate_path: &PathBuf,
output_path: &PathBuf,
f: impl Fn(MicrovmState) -> Result<MicrovmState, EditVmStateError>,
) -> Result<(), EditVmStateError> {
let snapshot = open_vmstate(vmstate_path)?;
let microvm_state = f(snapshot.data)?;
save_vmstate(microvm_state, output_path)?;
Ok(())
}
fn remove_regs(
mut state: MicrovmState,
remove_regs: &[u64],
) -> Result<MicrovmState, EditVmStateError> {
for (i, vcpu_state) in state.vcpu_states.iter_mut().enumerate() {
println!("Modifying state for vCPU {i}");
let mut removed = vec![false; remove_regs.len()];
let mut new_regs = Aarch64RegisterVec::default();
for reg in vcpu_state.regs.iter().filter(|reg| {
if let Some(pos) = remove_regs.iter().position(|r| r == ®.id) {
removed[pos] = true;
false
} else {
true
}
}) {
new_regs.push(reg);
}
vcpu_state.regs = new_regs;
for (reg, removed) in remove_regs.iter().zip(removed.iter()) {
print!("Register {reg:#x}: ");
match removed {
true => println!("removed"),
false => println!("not present"),
}
}
}
Ok(state)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_remove_regs() {
const KVM_REG_SIZE_U8: u64 = 0;
const KVM_REG_SIZE_U16: u64 = 0x10000000000000;
const KVM_REG_SIZE_U32: u64 = 0x20000000000000;
use vmm::arch::aarch64::regs::Aarch64RegisterRef;
use vmm::arch::aarch64::vcpu::VcpuState;
let vcpu_state = VcpuState {
regs: {
let mut regs = Aarch64RegisterVec::default();
let reg_data: u8 = 69;
regs.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U8,
®_data.to_le_bytes(),
));
let reg_data: u16 = 69;
regs.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U16,
®_data.to_le_bytes(),
));
let reg_data: u32 = 69;
regs.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U32,
®_data.to_le_bytes(),
));
regs
},
..Default::default()
};
let state = MicrovmState {
vcpu_states: vec![vcpu_state],
..Default::default()
};
let new_state = remove_regs(state, &[KVM_REG_SIZE_U32]).unwrap();
let expected_vcpu_state = VcpuState {
regs: {
let mut regs = Aarch64RegisterVec::default();
let reg_data: u8 = 69;
regs.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U8,
®_data.to_le_bytes(),
));
let reg_data: u16 = 69;
regs.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U16,
®_data.to_le_bytes(),
));
regs
},
..Default::default()
};
assert_eq!(new_state.vcpu_states[0].regs, expected_vcpu_state.regs);
}
#[test]
fn test_remove_non_existed_regs() {
const KVM_REG_SIZE_U8: u64 = 0;
const KVM_REG_SIZE_U16: u64 = 0x10000000000000;
const KVM_REG_SIZE_U32: u64 = 0x20000000000000;
use vmm::arch::aarch64::regs::Aarch64RegisterRef;
use vmm::arch::aarch64::vcpu::VcpuState;
let vcpu_state = VcpuState {
regs: {
let mut regs = Aarch64RegisterVec::default();
let reg_data: u8 = 69;
regs.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U8,
®_data.to_le_bytes(),
));
let reg_data: u16 = 69;
regs.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U16,
®_data.to_le_bytes(),
));
regs
},
..Default::default()
};
let state_clone = MicrovmState {
vcpu_states: vec![vcpu_state.clone()],
..Default::default()
};
let state = MicrovmState {
vcpu_states: vec![vcpu_state],
..Default::default()
};
let new_state = remove_regs(state_clone, &[KVM_REG_SIZE_U32]).unwrap();
assert_eq!(new_state.vcpu_states[0].regs, state.vcpu_states[0].regs);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/snapshot-editor/src/utils.rs | src/snapshot-editor/src/utils.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::{File, OpenOptions};
use std::path::PathBuf;
use vmm::persist::MicrovmState;
use vmm::snapshot::Snapshot;
// Some errors are only used in aarch64 code
#[allow(unused)]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum UtilsError {
/// Can not open snapshot file: {0}
VmStateFileOpen(std::io::Error),
/// Can not load snapshot: {0}
VmStateLoad(vmm::snapshot::SnapshotError),
/// Can not open output file: {0}
OutputFileOpen(std::io::Error),
/// Can not save snapshot: {0}
VmStateSave(vmm::snapshot::SnapshotError),
}
#[allow(unused)]
pub fn open_vmstate(snapshot_path: &PathBuf) -> Result<Snapshot<MicrovmState>, UtilsError> {
let mut snapshot_reader = File::open(snapshot_path).map_err(UtilsError::VmStateFileOpen)?;
Snapshot::load(&mut snapshot_reader).map_err(UtilsError::VmStateLoad)
}
// This method is used only in aarch64 code so far
#[allow(unused)]
pub fn save_vmstate(microvm_state: MicrovmState, output_path: &PathBuf) -> Result<(), UtilsError> {
let mut output_file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(output_path)
.map_err(UtilsError::OutputFileOpen)?;
let mut snapshot = Snapshot::new(microvm_state);
snapshot
.save(&mut output_file)
.map_err(UtilsError::VmStateSave)?;
Ok(())
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/snapshot-editor/src/edit_memory.rs | src/snapshot-editor/src/edit_memory.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::OpenOptions;
use std::io::{Seek, SeekFrom};
use std::os::fd::AsRawFd;
use std::path::PathBuf;
use clap::Subcommand;
use vmm::utils::u64_to_usize;
use vmm_sys_util::seek_hole::SeekHole;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum EditMemoryError {
/// Could not open memory file: {0}
OpenMemoryFile(std::io::Error),
/// Could not open diff file: {0}
OpenDiffFile(std::io::Error),
/// Failed to seek data in diff file: {0}
SeekDataDiff(std::io::Error),
/// Failed to seek hole in diff file: {0}
SeekHoleDiff(std::io::Error),
/// Failed to get metadata for diff file: {0}
MetadataDiff(std::io::Error),
/// Failed to seek in memory file: {0}
SeekMemory(std::io::Error),
/// Failed to send the file: {0}
SendFile(std::io::Error),
}
#[derive(Debug, Subcommand)]
pub enum EditMemorySubCommand {
/// Apply a diff snapshot on top of a base one
Rebase {
/// Path to the memory file.
#[arg(short, long)]
memory_path: PathBuf,
/// Path to the diff file.
#[arg(short, long)]
diff_path: PathBuf,
},
}
pub fn edit_memory_command(command: EditMemorySubCommand) -> Result<(), EditMemoryError> {
match command {
EditMemorySubCommand::Rebase {
memory_path,
diff_path,
} => rebase(memory_path, diff_path)?,
}
Ok(())
}
fn rebase(memory_path: PathBuf, diff_path: PathBuf) -> Result<(), EditMemoryError> {
let mut base_file = OpenOptions::new()
.write(true)
.open(memory_path)
.map_err(EditMemoryError::OpenMemoryFile)?;
let mut diff_file = OpenOptions::new()
.read(true)
.open(diff_path)
.map_err(EditMemoryError::OpenDiffFile)?;
let mut cursor: u64 = 0;
while let Some(block_start) = diff_file
.seek_data(cursor)
.map_err(EditMemoryError::SeekDataDiff)?
{
cursor = block_start;
let block_end = match diff_file
.seek_hole(block_start)
.map_err(EditMemoryError::SeekHoleDiff)?
{
Some(hole_start) => hole_start,
None => diff_file
.metadata()
.map_err(EditMemoryError::MetadataDiff)?
.len(),
};
while cursor < block_end {
base_file
.seek(SeekFrom::Start(cursor))
.map_err(EditMemoryError::SeekMemory)?;
// SAFETY: Safe because the parameters are valid.
let num_transferred_bytes = unsafe {
libc::sendfile64(
base_file.as_raw_fd(),
diff_file.as_raw_fd(),
(&mut cursor as *mut u64).cast::<i64>(),
u64_to_usize(block_end.saturating_sub(cursor)),
)
};
if num_transferred_bytes < 0 {
return Err(EditMemoryError::SendFile(std::io::Error::last_os_error()));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::{Seek, SeekFrom, Write};
use std::os::unix::fs::FileExt;
use vmm_sys_util::{rand, tempfile};
use super::*;
fn check_file_content(file: &File, expected_content: &[u8]) {
assert_eq!(
file.metadata().unwrap().len(),
expected_content.len() as u64
);
let mut buf = vec![0u8; expected_content.len()];
file.read_exact_at(buf.as_mut_slice(), 0).unwrap();
assert_eq!(&buf, expected_content);
}
#[test]
fn test_rebase_empty_files() {
let base = tempfile::TempFile::new().unwrap();
let diff = tempfile::TempFile::new().unwrap();
let base_file = base.as_file();
let base_path = base.as_path().to_path_buf();
let diff_path = diff.as_path().to_path_buf();
// Empty files
rebase(base_path, diff_path).unwrap();
assert_eq!(base_file.metadata().unwrap().len(), 0);
}
#[test]
fn test_rebase_empty_diff() {
let base = tempfile::TempFile::new().unwrap();
let diff = tempfile::TempFile::new().unwrap();
let mut base_file = base.as_file();
let diff_file = diff.as_file();
let base_path = base.as_path().to_path_buf();
let diff_path = diff.as_path().to_path_buf();
let initial_base_file_content = rand::rand_bytes(50000);
base_file.write_all(&initial_base_file_content).unwrap();
// Diff file that has only holes
diff_file
.set_len(initial_base_file_content.len() as u64)
.unwrap();
rebase(base_path, diff_path).unwrap();
check_file_content(base_file, &initial_base_file_content);
}
#[test]
fn test_rebase_full_diff() {
let base = tempfile::TempFile::new().unwrap();
let diff = tempfile::TempFile::new().unwrap();
let base_file = base.as_file();
let mut diff_file = diff.as_file();
let base_path = base.as_path().to_path_buf();
let diff_path = diff.as_path().to_path_buf();
// Diff file that has only data
let diff_data = rand::rand_bytes(50000);
diff_file.write_all(&diff_data).unwrap();
rebase(base_path, diff_path).unwrap();
check_file_content(base_file, &diff_data);
}
#[test]
fn test_rebase() {
// The filesystem punches holes only for blocks >= 4096.
// It doesn't make sense to test for smaller ones.
let block_sizes: &[usize] = &[4096, 8192];
for &block_size in block_sizes {
let mut expected_result = vec![];
let base = tempfile::TempFile::new().unwrap();
let diff = tempfile::TempFile::new().unwrap();
let mut base_file = base.as_file();
let mut diff_file = diff.as_file();
let base_path = base.as_path().to_path_buf();
let diff_path = diff.as_path().to_path_buf();
// 1. Populated block both in base and diff file
// block: [ ]
// diff: [ ]
// expected: [d]
let base_block = rand::rand_bytes(block_size);
base_file.write_all(&base_block).unwrap();
let diff_block = rand::rand_bytes(block_size);
diff_file.write_all(&diff_block).unwrap();
expected_result.extend(diff_block);
// 2. Populated block in base file, hole in diff file
// block: [ ] [ ]
// diff: [ ] ___
// expected: [d] [b]
let base_block = rand::rand_bytes(block_size);
base_file.write_all(&base_block).unwrap();
diff_file
.seek(SeekFrom::Current(i64::try_from(block_size).unwrap()))
.unwrap();
expected_result.extend(base_block);
// 3. Populated block in base file, zeroes block in diff file
// block: [ ] [ ] [ ]
// diff: [ ] ___ [0]
// expected: [d] [b] [d]
let base_block = rand::rand_bytes(block_size);
base_file.write_all(&base_block).unwrap();
let diff_block = vec![0u8; block_size];
diff_file.write_all(&diff_block).unwrap();
expected_result.extend(diff_block);
// Rebase and check the result
rebase(base_path.clone(), diff_path.clone()).unwrap();
check_file_content(base_file, &expected_result);
// 4. The diff file is bigger
// block: [ ] [ ] [ ]
// diff: [ ] ___ [0] [ ]
// expected: [d] [b] [d] [d]
let diff_block = rand::rand_bytes(block_size);
diff_file.write_all(&diff_block).unwrap();
expected_result.extend(diff_block);
// Rebase and check the result
rebase(base_path.clone(), diff_path.clone()).unwrap();
check_file_content(base_file, &expected_result);
// 5. The base file is bigger
// block: [ ] [ ] [ ] [ ] [ ]
// diff: [ ] ___ [0] [ ]
// expected: [d] [b] [d] [d] [b]
let base_block = rand::rand_bytes(block_size);
// Adding to the base file 2 times because
// it is 1 block smaller then diff right now.
base_file.write_all(&base_block).unwrap();
base_file.write_all(&base_block).unwrap();
expected_result.extend(base_block);
// Rebase and check the result
rebase(base_path, diff_path).unwrap();
check_file_content(base_file, &expected_result);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/snapshot-editor/src/main.rs | src/snapshot-editor/src/main.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use clap::{Parser, Subcommand};
mod edit_memory;
#[cfg(target_arch = "aarch64")]
mod edit_vmstate;
mod info;
mod utils;
use edit_memory::{EditMemoryError, EditMemorySubCommand, edit_memory_command};
#[cfg(target_arch = "aarch64")]
use edit_vmstate::{EditVmStateError, EditVmStateSubCommand, edit_vmstate_command};
use info::{InfoVmStateError, InfoVmStateSubCommand, info_vmstate_command};
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum SnapEditorError {
/// Error during editing memory file: {0}
EditMemory(#[from] EditMemoryError),
#[cfg(target_arch = "aarch64")]
/// Error during editing vmstate file: {0}
EditVmState(#[from] EditVmStateError),
/// Error during getting info from a vmstate file: {0}
InfoVmState(#[from] InfoVmStateError),
}
#[derive(Debug, Parser)]
#[command(version = format!("v{}", env!("CARGO_PKG_VERSION")))]
struct Cli {
#[command(subcommand)]
command: Command,
}
#[derive(Debug, Subcommand)]
enum Command {
#[command(subcommand)]
EditMemory(EditMemorySubCommand),
#[cfg(target_arch = "aarch64")]
#[command(subcommand)]
EditVmstate(EditVmStateSubCommand),
#[command(subcommand)]
InfoVmstate(InfoVmStateSubCommand),
}
fn main_exec() -> Result<(), SnapEditorError> {
let cli = Cli::parse();
match cli.command {
Command::EditMemory(command) => edit_memory_command(command)?,
#[cfg(target_arch = "aarch64")]
Command::EditVmstate(command) => edit_vmstate_command(command)?,
Command::InfoVmstate(command) => info_vmstate_command(command)?,
}
Ok(())
}
fn main() -> Result<(), SnapEditorError> {
let result = main_exec();
if let Err(e) = result {
eprintln!("{}", e);
Err(e)
} else {
Ok(())
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/clippy-tracing/src/main.rs | src/clippy-tracing/src/main.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! A tool to add, remove and check for `tracing::instrument` in large projects where it is
//! infeasible to manually add it to thousands of functions.
use std::collections::HashMap;
use std::error::Error;
use std::fmt;
use std::fs::OpenOptions;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::process::ExitCode;
use clap::{Parser, ValueEnum};
use syn::spanned::Spanned;
use syn::visit::Visit;
use walkdir::WalkDir;
/// The command line arguments for the application.
#[derive(Parser)]
struct CommandLineArgs {
/// The action to take.
#[arg(long)]
action: Action,
/// The path to look in.
#[arg(long)]
path: Option<PathBuf>,
/// When adding instrumentation use a custom suffix e.g.
/// `--suffix my::custom::suffix::`.
///
/// The tool may be unable to strip instrumentation with an invalid suffix.
#[arg(long)]
suffix: Option<String>,
/// Whether to add a `cfg_attr` condition e.g.
/// `#[cfg_attr(feature = "tracing", log_instrument::instrument)]` vs
/// `#[log_instrument::instrument]`.
#[arg(long)]
cfg_attr: Option<String>,
/// Sub-paths which contain any of the strings from this list will be ignored.
#[arg(long, value_delimiter = ',')]
exclude: Vec<String>,
}
/// The action to take.
#[derive(Clone, ValueEnum)]
enum Action {
/// Checks `tracing::instrument` is on all functions.
Check,
/// Adds `tracing::instrument` to all functions.
Fix,
/// Removes `tracing::instrument` from all functions.
Strip,
}
/// A list of text lines split so that newlines can be efficiently inserted between them.
struct SegmentedList {
/// The first new line.
first: String,
/// The inner vector used to contain the original lines `.0` and the new lines `.1`.
inner: Vec<(String, String)>,
}
impl SegmentedList {
/// Sets the text line before `line` to `text`.
fn set_before(&mut self, line: usize, text: String) -> bool {
let s = if let Some(i) = line.checked_sub(1) {
let Some(mut_ref) = self.inner.get_mut(i) else {
return false;
};
&mut mut_ref.1
} else {
&mut self.first
};
*s = text;
true
}
}
impl From<SegmentedList> for String {
fn from(list: SegmentedList) -> String {
let iter = list
.inner
.into_iter()
.map(|(x, y)| format!("{x}{}{y}", if y.is_empty() { "" } else { "\n" }));
format!(
"{}{}{}",
list.first,
if list.first.is_empty() { "" } else { "\n" },
itertools::intersperse(iter, String::from("\n")).collect::<String>()
)
}
}
/// Visitor for the `strip` action.
struct StripVisitor(HashMap<usize, String>);
impl From<StripVisitor> for String {
fn from(visitor: StripVisitor) -> String {
let mut vec = visitor.0.into_iter().collect::<Vec<_>>();
vec.sort_by_key(|(i, _)| *i);
itertools::intersperse(vec.into_iter().map(|(_, x)| x), String::from("\n"))
.collect::<String>()
}
}
macro_rules! create_strip_visitor_function {
($func_name:ident, $item:ident) => {
fn $func_name(&mut self, i: &syn::$item) {
if let Some(instrument) = find_instrumented(&i.attrs) {
let start = instrument.span().start().line - 1;
let end = instrument.span().end().line;
for line in start..end {
self.0.remove(&line);
}
}
self.visit_block(&i.block);
}
};
}
impl syn::visit::Visit<'_> for StripVisitor {
create_strip_visitor_function!(visit_impl_item_fn, ImplItemFn);
create_strip_visitor_function!(visit_item_fn, ItemFn);
}
/// Visitor for the `check` action.
struct CheckVisitor(Option<proc_macro2::Span>);
macro_rules! create_check_visitor_function {
($func_name:ident, $item:ident) => {
fn $func_name(&mut self, i: &syn::$item) {
let attr = check_attributes(&i.attrs);
if !attr.instrumented && !attr.test && i.sig.constness.is_none() {
self.0 = Some(i.span());
} else {
self.visit_block(&i.block);
}
}
};
}
impl syn::visit::Visit<'_> for CheckVisitor {
create_check_visitor_function!(visit_impl_item_fn, ImplItemFn);
create_check_visitor_function!(visit_item_fn, ItemFn);
}
/// Visitor for the `fix` action.
struct FixVisitor<'a> {
/// A custom path suffix.
suffix: &'a Option<String>,
/// A `cfg_attr` condition.
cfg_attr: &'a Option<String>,
/// Source
list: SegmentedList,
}
impl From<FixVisitor<'_>> for String {
fn from(visitor: FixVisitor) -> String {
String::from(visitor.list)
}
}
macro_rules! create_fix_visitor_function {
($func_name:ident, $item:ident) => {
fn $func_name(&mut self, i: &syn::$item) {
let attr = check_attributes(&i.attrs);
if !attr.instrumented && !attr.test && i.sig.constness.is_none() {
let line = i.span().start().line;
let attr_string = instrument(&i.sig, self.suffix, self.cfg_attr);
let indent = i.span().start().column;
let indent_attr = format!("{}{attr_string}", " ".repeat(indent));
self.list.set_before(line - 1, indent_attr);
}
self.visit_block(&i.block);
}
};
}
impl syn::visit::Visit<'_> for FixVisitor<'_> {
create_fix_visitor_function!(visit_impl_item_fn, ImplItemFn);
create_fix_visitor_function!(visit_item_fn, ItemFn);
}
fn instrument(sig: &syn::Signature, suffix: &Option<String>, cfg_attr: &Option<String>) -> String {
let instr = inner_instrument(sig, suffix);
if let Some(cfg_attr) = cfg_attr {
format!("#[cfg_attr({cfg_attr}, {instr})]")
} else {
format!("#[{instr}]")
}
}
/// Returns the instrument macro for a given function signature.
fn inner_instrument(_sig: &syn::Signature, suffix: &Option<String>) -> String {
format!(
"{}instrument",
suffix.as_ref().map_or("log_instrument::", String::as_str)
)
}
/// Type to return from `main` to support returning an error then handling it.
#[repr(u8)]
enum Exit {
/// Process completed successfully.
Ok = 0,
/// Process encountered an error.
Error = 1,
/// Process ran `check` action and found missing instrumentation.
Check = 2,
}
#[allow(clippy::as_conversions)]
impl std::process::Termination for Exit {
fn report(self) -> ExitCode {
ExitCode::from(self as u8)
}
}
fn main() -> Exit {
match exec() {
Err(err) => {
eprintln!("Error: {err}");
Exit::Error
}
Ok(None) => Exit::Ok,
Ok(Some((path, line, column))) => {
println!(
"Missing instrumentation at {}:{line}:{column}.",
path.display()
);
Exit::Check
}
}
}
/// Error for [`exec`].
#[derive(Debug)]
enum ExecError {
/// Failed to read entry in file path.
Entry(walkdir::Error),
/// Failed to parse file path to string.
String,
/// Failed to open file.
File(std::io::Error),
/// Failed to run apply function.
Apply(ApplyError),
}
impl fmt::Display for ExecError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Entry(entry) => write!(f, "Failed to read entry in file path: {entry}"),
Self::String => write!(f, "Failed to parse file path to string."),
Self::File(file) => write!(f, "Failed to open file: {file}"),
Self::Apply(apply) => write!(f, "Failed to run apply function: {apply}"),
}
}
}
impl Error for ExecError {}
/// Wraps functionality from `main` to support returning an error then handling it.
fn exec() -> Result<Option<(PathBuf, usize, usize)>, ExecError> {
let args = CommandLineArgs::parse();
let path = args.path.unwrap_or_else(|| PathBuf::from("."));
for entry_res in WalkDir::new(path).follow_links(true) {
let entry = entry_res.map_err(ExecError::Entry)?;
let entry_path = entry.into_path();
let path_str = entry_path.to_str().ok_or(ExecError::String)?;
// File paths must not contain any excluded strings.
let no_excluded_strings = !args.exclude.iter().any(|e| path_str.contains(e));
// The file must not be a `build.rs` file.
let not_build_file = !entry_path.ends_with("build.rs");
// The file must be a `.rs` file.
let is_rs_file = entry_path.extension().is_some_and(|ext| ext == "rs");
if no_excluded_strings && not_build_file && is_rs_file {
let file = OpenOptions::new()
.read(true)
.open(&entry_path)
.map_err(ExecError::File)?;
let res = apply(&args.action, &args.suffix, &args.cfg_attr, file, |_| {
OpenOptions::new()
.write(true)
.truncate(true)
.open(&entry_path)
})
.map_err(ExecError::Apply)?;
if let Some(span) = res {
return Ok(Some((entry_path, span.start().line, span.start().column)));
}
}
}
Ok(None)
}
/// Error for [`apply`].
#[derive(Debug)]
enum ApplyError {
/// Failed to read file.
Read(std::io::Error),
/// Failed to parse file to utf8.
Utf(core::str::Utf8Error),
/// Failed to parse file to syn ast.
Syn(syn::parse::Error),
/// Failed to get write target.
Target(std::io::Error),
/// Failed to write result to target.
Write(std::io::Error),
}
impl fmt::Display for ApplyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Read(read) => write!(f, "Failed to read file: {read}"),
Self::Utf(utf) => write!(f, "Failed to parse file to utf8: {utf}"),
Self::Syn(syn) => write!(f, "Failed to parse file to syn ast: {syn}"),
Self::Target(target) => write!(f, "Failed to get write target: {target}"),
Self::Write(write) => write!(f, "Failed to write result to target: {write}"),
}
}
}
impl Error for ApplyError {}
/// Apply the given action to the given source and outputs the result to the target produced by the
/// given closure.
fn apply<R: Read, W: Write>(
action: &Action,
suffix: &Option<String>,
cfg_attr: &Option<String>,
mut source: R,
target: impl Fn(R) -> Result<W, std::io::Error>,
) -> Result<Option<proc_macro2::Span>, ApplyError> {
let mut buf = Vec::new();
source.read_to_end(&mut buf).map_err(ApplyError::Read)?;
let text = core::str::from_utf8(&buf).map_err(ApplyError::Utf)?;
let ast = syn::parse_file(text).map_err(ApplyError::Syn)?;
match action {
Action::Strip => {
let mut visitor = StripVisitor(
text.split('\n')
.enumerate()
.map(|(i, x)| (i, String::from(x)))
.collect(),
);
visitor.visit_file(&ast);
let out = String::from(visitor);
target(source)
.map_err(ApplyError::Target)?
.write_all(out.as_bytes())
.map_err(ApplyError::Write)?;
Ok(None)
}
Action::Check => {
let mut visitor = CheckVisitor(None);
visitor.visit_file(&ast);
Ok(visitor.0)
}
Action::Fix => {
let mut visitor = FixVisitor {
suffix,
cfg_attr,
list: SegmentedList {
first: String::new(),
inner: text
.split('\n')
.map(|x| (String::from(x), String::new()))
.collect(),
},
};
visitor.visit_file(&ast);
let out = String::from(visitor);
target(source)
.map_err(ApplyError::Target)?
.write_all(out.as_bytes())
.map_err(ApplyError::Write)?;
Ok(None)
}
}
}
/// Finds the `#[instrument]` attribute on a function.
fn find_instrumented(attrs: &[syn::Attribute]) -> Option<&syn::Attribute> {
attrs.iter().find(|a| is_instrumented(a).is_some())
}
/// Checks if a `syn::Attribute` is `#[instrument]`.
fn is_instrumented(attr: &syn::Attribute) -> Option<&syn::Attribute> {
match &attr.meta {
syn::Meta::List(syn::MetaList { path, tokens, .. }) => {
// `#[instrument]`
let instrumented = matches!(path.segments.last(), Some(syn::PathSegment { ident, .. }) if ident == "instrument");
// `#[cfg_attr(.. , instrument)]`
let attr_instrumented = matches!(path.segments.last(), Some(syn::PathSegment { ident, .. }) if ident == "cfg_attr") && tokens.clone().into_iter().any(|token| matches!(token, proc_macro2::TokenTree::Ident(ident) if ident == "instrument"));
(instrumented || attr_instrumented).then_some(attr)
}
syn::Meta::Path(syn::Path { segments, .. }) => {
let x = matches!(segments.last(), Some(syn::PathSegment { ident, .. }) if ident == "instrument");
x.then_some(attr)
}
syn::Meta::NameValue(_) => None,
}
}
/// The description of attributes on a function signature we care about.
struct Desc {
/// Does the function have the `#[tracing::instrument]` attribute macro?
instrumented: bool,
/// Does the function have the `#[test]` attribute macro?
test: bool,
}
// A function is considered instrumented if it has the `#[instrument]` attribute or the `#[test]`
// attribute.
/// Returns a tuple where the 1st element is whether `tracing::instrument` is found in the list of
/// attributes and the 2nd is whether `clippy_tracing_attributes::skip` is found in the list of
/// attributes.
fn check_attributes(attrs: &[syn::Attribute]) -> Desc {
let mut instrumented = false;
let mut test = false;
for attr in attrs {
// Match `#[instrument]` and `#[cfg_attr(.., instrument)]`.
if is_instrumented(attr).is_some() {
instrumented = true;
}
// Match `#[test]` or `#[kani::proof]`.
if match &attr.meta {
syn::Meta::List(syn::MetaList { path, .. }) => {
matches!(path.segments.last(), Some(syn::PathSegment { ident, .. }) if ident == "proof")
}
syn::Meta::Path(syn::Path { segments, .. }) => {
matches!(segments.last(), Some(syn::PathSegment { ident, .. }) if ident == "test" || ident == "proof")
}
syn::Meta::NameValue(_) => false,
} {
test = true;
}
}
Desc { instrumented, test }
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/clippy-tracing/tests/integration_tests.rs | src/clippy-tracing/tests/integration_tests.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Allow test functions outside of test modules
#![allow(clippy::tests_outside_test_module)]
use std::fs::{OpenOptions, remove_file};
use std::io::{Read, Write};
use std::process::Command;
use uuid::Uuid;
const BINARY: &str = env!("CARGO_BIN_EXE_clippy-tracing");
fn setup(text: &str) -> String {
let id = Uuid::new_v4();
let path = format!("/tmp/{id}.rs");
let mut file = OpenOptions::new()
.create(true)
.truncate(true)
.read(false)
.write(true)
.open(&path)
.unwrap();
file.write_all(text.as_bytes()).unwrap();
path
}
fn check_file(text: &str, path: &str) {
let mut file = OpenOptions::new()
.create(false)
.read(true)
.write(false)
.open(path)
.unwrap();
let mut buffer = String::new();
file.read_to_string(&mut buffer).unwrap();
assert_eq!(text, buffer);
}
fn fix(given: &str, expected: &str, cfg_attr: Option<&'static str>) {
let path = setup(given);
let output = if let Some(cfg_attr) = cfg_attr {
Command::new(BINARY)
.args(["--action", "fix", "--path", &path, "--cfg-attr", cfg_attr])
.output()
.unwrap()
} else {
Command::new(BINARY)
.args(["--action", "fix", "--path", &path])
.output()
.unwrap()
};
assert_eq!(std::str::from_utf8(&output.stdout).unwrap(), "");
assert_eq!(std::str::from_utf8(&output.stderr).unwrap(), "");
assert_eq!(output.status.code(), Some(0));
check_file(expected, &path);
remove_file(path).unwrap();
}
fn strip(given: &str, expected: &str) {
let path = setup(given);
let output = Command::new(BINARY)
.args(["--action", "strip", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
check_file(expected, &path);
remove_file(path).unwrap();
}
#[test]
fn exec_error() {
// Create file path for a file that doesn't exist.
let id = Uuid::new_v4();
let path = format!("/tmp/{id}.rs");
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(1));
assert_eq!(output.stdout, []);
let expected_stderr = format!(
"Error: Failed to read entry in file path: IO error for operation on {path}: No such file \
or directory (os error 2)\n"
);
assert_eq!(output.stderr, expected_stderr.as_bytes());
}
#[test]
fn fix_one() {
const GIVEN: &str = "fn main() { }\nfn add(lhs: i32, rhs: i32) {\n lhs + rhs\n}";
const EXPECTED: &str = "#[log_instrument::instrument]\nfn main() { \
}\n#[log_instrument::instrument]\nfn add(lhs: i32, rhs: i32) {\n \
lhs + rhs\n}";
fix(GIVEN, EXPECTED, None);
}
#[test]
fn fix_two() {
const GIVEN: &str = "impl Unit {\n fn one() {}\n}";
const EXPECTED: &str = "impl Unit {\n #[log_instrument::instrument]\n fn one() {}\n}";
fix(GIVEN, EXPECTED, None);
}
#[test]
fn fix_three() {
const GIVEN: &str = "impl Unit {\n fn one() {}\n}";
const EXPECTED: &str = "impl Unit {\n #[cfg_attr(feature = \"tracing\", \
log_instrument::instrument)]\n fn one() {}\n}";
fix(GIVEN, EXPECTED, Some("feature = \"tracing\""));
}
#[test]
fn check_one() {
const GIVEN: &str = "fn main() { }";
let path = setup(GIVEN);
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(2));
let expected_stdout = format!("Missing instrumentation at {path}:1:0.\n");
assert_eq!(output.stdout, expected_stdout.as_bytes());
assert_eq!(output.stderr, []);
remove_file(path).unwrap();
}
#[test]
fn check_two() {
const GIVEN: &str = "#[log_instrument::instrument]\nfn main() { }\n#[test]\nfn my_test() { }";
let path: String = setup(GIVEN);
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
remove_file(path).unwrap();
}
#[test]
fn check_three() {
const GIVEN: &str = "impl Unit {\n #[cfg_attr(feature = \"tracing\", \
tracing::instrument(level = \"trace\", skip()))]\n fn one() {}\n}";
let path = setup(GIVEN);
let output = Command::new(BINARY)
.args([
"--action",
"check",
"--path",
&path,
"--cfg-attr",
"feature = \"tracing\"",
])
.output()
.unwrap();
assert_eq!(std::str::from_utf8(&output.stdout).unwrap(), "");
assert_eq!(std::str::from_utf8(&output.stderr).unwrap(), "");
assert_eq!(output.status.code(), Some(0));
remove_file(path).unwrap();
}
#[test]
fn strip_one() {
const GIVEN: &str = "#[log_instrument::instrument]\nfn main() { }";
const EXPECTED: &str = "fn main() { }";
strip(GIVEN, EXPECTED);
}
#[test]
fn strip_two() {
const GIVEN: &str = "#[log_instrument::instrument]\nfn main() { }";
const EXPECTED: &str = "fn main() { }";
strip(GIVEN, EXPECTED);
}
#[test]
fn strip_three() {
const EXPECTED: &str = "impl Unit {\n fn one() {}\n}";
const GIVEN: &str = "impl Unit {\n #[log_instrument::instrument]\n fn one() {}\n}";
strip(GIVEN, EXPECTED);
}
#[test]
fn exclude() {
const GIVEN: &str = "fn main() { }\nfn add(lhs: i32, rhs: i32) {\n lhs + rhs\n}";
const EXPECTED: &str = "#[log_instrument::instrument]\nfn main() { \
}\n#[log_instrument::instrument]\nfn add(lhs: i32, rhs: i32) {\n \
lhs + rhs\n}";
let dir_path = format!("/tmp/{}", Uuid::new_v4());
std::fs::create_dir(&dir_path).unwrap();
dbg!(&dir_path);
let file_path_one = format!("{dir_path}/{}.rs", Uuid::new_v4());
let file_path_two = format!("{dir_path}/{}.rs", Uuid::new_v4());
dbg!(&file_path_one);
dbg!(&file_path_two);
let mut file_one = OpenOptions::new()
.create(true)
.truncate(true)
.read(false)
.write(true)
.open(&file_path_one)
.unwrap();
file_one.write_all(GIVEN.as_bytes()).unwrap();
let mut file_two = OpenOptions::new()
.create(true)
.truncate(true)
.read(false)
.write(true)
.open(&file_path_two)
.unwrap();
file_two.write_all(GIVEN.as_bytes()).unwrap();
let output = Command::new(BINARY)
.args([
"--action",
"fix",
"--path",
&dir_path,
"--exclude",
&file_path_two,
])
.output()
.unwrap();
assert_eq!(std::str::from_utf8(&output.stdout).unwrap(), "");
assert_eq!(std::str::from_utf8(&output.stderr).unwrap(), "");
assert_eq!(output.status.code(), Some(0));
check_file(EXPECTED, &file_path_one);
check_file(GIVEN, &file_path_two);
remove_file(file_path_one).unwrap();
remove_file(file_path_two).unwrap();
std::fs::remove_dir(dir_path).unwrap();
}
#[test]
fn readme() {
const GIVEN: &str = r#"fn main() {
println!("Hello World!");
}
fn add(lhs: i32, rhs: i32) -> i32 {
lhs + rhs
}
#[cfg(tests)]
mod tests {
fn sub(lhs: i32, rhs: i32) -> i32 {
lhs - rhs
}
#[test]
fn test_one() {
assert_eq!(add(1,1), sub(2, 1));
}
}"#;
let path: String = setup(GIVEN);
// Check
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(2));
let missing = format!("Missing instrumentation at {path}:9:4.\n");
assert_eq!(output.stdout, missing.as_bytes());
assert_eq!(output.stderr, []);
const EXPECTED: &str = r#"#[log_instrument::instrument]
fn main() {
println!("Hello World!");
}
#[log_instrument::instrument]
fn add(lhs: i32, rhs: i32) -> i32 {
lhs + rhs
}
#[cfg(tests)]
mod tests {
#[log_instrument::instrument]
fn sub(lhs: i32, rhs: i32) -> i32 {
lhs - rhs
}
#[test]
fn test_one() {
assert_eq!(add(1,1), sub(2, 1));
}
}"#;
// Fix
let output = Command::new(BINARY)
.args(["--action", "fix", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
check_file(EXPECTED, &path);
// Check
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
// Strip
let output = Command::new(BINARY)
.args(["--action", "strip", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
check_file(GIVEN, &path);
}
#[test]
fn readme_empty_suffix() {
const GIVEN: &str = r#"fn main() {
println!("Hello World!");
}
fn add(lhs: i32, rhs: i32) -> i32 {
lhs + rhs
}
#[cfg(tests)]
mod tests {
fn sub(lhs: i32, rhs: i32) -> i32 {
lhs - rhs
}
#[test]
fn test_one() {
assert_eq!(add(1,1), sub(2, 1));
}
}"#;
let path: String = setup(GIVEN);
// Check
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(2));
let missing = format!("Missing instrumentation at {path}:9:4.\n");
assert_eq!(output.stdout, missing.as_bytes());
assert_eq!(output.stderr, []);
const EXPECTED: &str = r#"#[instrument]
fn main() {
println!("Hello World!");
}
#[instrument]
fn add(lhs: i32, rhs: i32) -> i32 {
lhs + rhs
}
#[cfg(tests)]
mod tests {
#[instrument]
fn sub(lhs: i32, rhs: i32) -> i32 {
lhs - rhs
}
#[test]
fn test_one() {
assert_eq!(add(1,1), sub(2, 1));
}
}"#;
// Fix
let output = Command::new(BINARY)
.args(["--action", "fix", "--suffix", "", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
check_file(EXPECTED, &path);
// Check
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
// Strip
let output = Command::new(BINARY)
.args(["--action", "strip", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
check_file(GIVEN, &path);
}
#[test]
fn readme_custom_suffix() {
const GIVEN: &str = r#"fn main() {
println!("Hello World!");
}
fn add(lhs: i32, rhs: i32) -> i32 {
lhs + rhs
}
#[cfg(tests)]
mod tests {
fn sub(lhs: i32, rhs: i32) -> i32 {
lhs - rhs
}
#[test]
fn test_one() {
assert_eq!(add(1,1), sub(2, 1));
}
}"#;
let path: String = setup(GIVEN);
// Check
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(2));
let missing = format!("Missing instrumentation at {path}:9:4.\n");
assert_eq!(output.stdout, missing.as_bytes());
assert_eq!(output.stderr, []);
const EXPECTED: &str = r#"#[my::custom::suffix::instrument]
fn main() {
println!("Hello World!");
}
#[my::custom::suffix::instrument]
fn add(lhs: i32, rhs: i32) -> i32 {
lhs + rhs
}
#[cfg(tests)]
mod tests {
#[my::custom::suffix::instrument]
fn sub(lhs: i32, rhs: i32) -> i32 {
lhs - rhs
}
#[test]
fn test_one() {
assert_eq!(add(1,1), sub(2, 1));
}
}"#;
// Fix
let output = Command::new(BINARY)
.args([
"--action",
"fix",
"--suffix",
"my::custom::suffix::",
"--path",
&path,
])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
check_file(EXPECTED, &path);
// Check
let output = Command::new(BINARY)
.args(["--action", "check", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
// Strip
let output = Command::new(BINARY)
.args(["--action", "strip", "--path", &path])
.output()
.unwrap();
assert_eq!(output.status.code(), Some(0));
assert_eq!(output.stdout, []);
assert_eq!(output.stderr, []);
check_file(GIVEN, &path);
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/jailer/src/chroot.rs | src/jailer/src/chroot.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::env;
use std::ffi::CStr;
use std::path::Path;
use std::ptr::null;
use vmm_sys_util::syscall::SyscallReturnCode;
use super::{JailerError, to_cstring};
const OLD_ROOT_DIR: &CStr = c"old_root";
const ROOT_DIR: &CStr = c"/";
const CURRENT_DIR: &CStr = c".";
// This uses switching to a new mount namespace + pivot_root(), together with the regular chroot,
// to provide a hardened jail (at least compared to only relying on chroot).
pub fn chroot(path: &Path) -> Result<(), JailerError> {
// We unshare into a new mount namespace.
// SAFETY: The call is safe because we're invoking a C library
// function with valid parameters.
SyscallReturnCode(unsafe { libc::unshare(libc::CLONE_NEWNS) })
.into_empty_result()
.map_err(JailerError::UnshareNewNs)?;
// Recursively change the propagation type of all the mounts in this namespace to SLAVE, so
// we can call pivot_root.
// SAFETY: Safe because we provide valid parameters.
SyscallReturnCode(unsafe {
libc::mount(
null(),
ROOT_DIR.as_ptr(),
null(),
libc::MS_SLAVE | libc::MS_REC,
null(),
)
})
.into_empty_result()
.map_err(JailerError::MountPropagationSlave)?;
// We need a CString for the following mount call.
let chroot_dir = to_cstring(path)?;
// Bind mount the jail root directory over itself, so we can go around a restriction
// imposed by pivot_root, which states that the new root and the old root should not
// be on the same filesystem.
// SAFETY: Safe because we provide valid parameters.
SyscallReturnCode(unsafe {
libc::mount(
chroot_dir.as_ptr(),
chroot_dir.as_ptr(),
null(),
libc::MS_BIND | libc::MS_REC,
null(),
)
})
.into_empty_result()
.map_err(JailerError::MountBind)?;
// Change current dir to the chroot dir, so we only need to handle relative paths from now on.
env::set_current_dir(path).map_err(JailerError::SetCurrentDir)?;
// Create the old_root folder we're going to use for pivot_root, using a relative path.
// SAFETY: The call is safe because we provide valid arguments.
SyscallReturnCode(unsafe { libc::mkdir(OLD_ROOT_DIR.as_ptr(), libc::S_IRUSR | libc::S_IWUSR) })
.into_empty_result()
.map_err(JailerError::MkdirOldRoot)?;
// We are now ready to call pivot_root. We have to use sys_call because there is no libc
// wrapper for pivot_root.
// SAFETY: Safe because we provide valid parameters.
SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_pivot_root,
CURRENT_DIR.as_ptr(),
OLD_ROOT_DIR.as_ptr(),
)
})
.into_empty_result()
.map_err(JailerError::PivotRoot)?;
// pivot_root doesn't guarantee that we will be in "/" at this point, so switch to "/"
// explicitly.
// SAFETY: Safe because we provide valid parameters.
SyscallReturnCode(unsafe { libc::chdir(ROOT_DIR.as_ptr()) })
.into_empty_result()
.map_err(JailerError::ChdirNewRoot)?;
// Umount the old_root, thus isolating the process from everything outside the jail root folder.
// SAFETY: Safe because we provide valid parameters.
SyscallReturnCode(unsafe { libc::umount2(OLD_ROOT_DIR.as_ptr(), libc::MNT_DETACH) })
.into_empty_result()
.map_err(JailerError::UmountOldRoot)?;
// Remove the no longer necessary old_root directory.
// SAFETY: Safe because we provide valid parameters.
SyscallReturnCode(unsafe { libc::rmdir(OLD_ROOT_DIR.as_ptr()) })
.into_empty_result()
.map_err(JailerError::RmOldRootDir)
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/jailer/src/env.rs | src/jailer/src/env.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::ffi::{CStr, CString, OsString};
use std::fs::{self, File, OpenOptions, Permissions, canonicalize, read_to_string};
use std::io;
use std::io::Write;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::io::AsRawFd;
use std::os::unix::process::CommandExt;
use std::path::{Component, Path, PathBuf};
use std::process::{Command, Stdio, exit, id};
use utils::arg_parser::UtilsArgParserError::MissingValue;
use utils::time::{ClockType, get_time_us};
use utils::{arg_parser, validators};
use vmm_sys_util::syscall::SyscallReturnCode;
use crate::JailerError;
use crate::cgroup::{CgroupConfiguration, CgroupConfigurationBuilder};
use crate::chroot::chroot;
use crate::resource_limits::{FSIZE_ARG, NO_FILE_ARG, ResourceLimits};
pub const PROC_MOUNTS: &str = "/proc/mounts";
const STDIN_FILENO: libc::c_int = 0;
const STDOUT_FILENO: libc::c_int = 1;
const STDERR_FILENO: libc::c_int = 2;
// Kernel-based virtual machine (hardware virtualization extensions)
// minor/major numbers are taken from
// https://www.kernel.org/doc/html/latest/admin-guide/devices.html
const DEV_KVM: &CStr = c"/dev/kvm";
const DEV_KVM_MAJOR: u32 = 10;
const DEV_KVM_MINOR: u32 = 232;
// TUN/TAP device minor/major numbers are taken from
// www.kernel.org/doc/Documentation/networking/tuntap.txt
const DEV_NET_TUN: &CStr = c"/dev/net/tun";
const DEV_NET_TUN_MAJOR: u32 = 10;
const DEV_NET_TUN_MINOR: u32 = 200;
// Random number generator device minor/major numbers are taken from
// https://www.kernel.org/doc/Documentation/admin-guide/devices.txt
const DEV_URANDOM: &CStr = c"/dev/urandom";
const DEV_URANDOM_MAJOR: u32 = 1;
const DEV_URANDOM_MINOR: u32 = 9;
// Userfault file descriptor device path. This is a misc character device
// with a MISC_DYNAMIC_MINOR minor device:
// https://elixir.bootlin.com/linux/v6.1.51/source/fs/userfaultfd.c#L2176.
//
// This means that its minor device number will be allocated at run time,
// so we will have to find it at initialization time parsing /proc/misc.
// What we do know is the major number for misc devices:
// https://elixir.bootlin.com/linux/v6.1.51/source/Documentation/admin-guide/devices.txt
const DEV_UFFD_PATH: &CStr = c"/dev/userfaultfd";
const DEV_UFFD_MAJOR: u32 = 10;
// Relevant folders inside the jail that we create or/and for which we change ownership.
// We need /dev in order to be able to create /dev/kvm and /dev/net/tun device.
// We need /run for the default location of the api socket.
// Since libc::chown is not recursive, we cannot specify only /dev/net as we want
// to walk through the entire folder hierarchy.
const FOLDER_HIERARCHY: [&str; 4] = ["/", "/dev", "/dev/net", "/run"];
const FOLDER_PERMISSIONS: u32 = 0o700;
// When running with `--new-pid-ns` flag, the PID of the process running the exec_file differs
// from jailer's and it is stored inside a dedicated file, prefixed with the below extension.
const PID_FILE_EXTENSION: &str = ".pid";
// Helper function, since we'll use libc::dup2 a bunch of times for daemonization.
fn dup2(old_fd: libc::c_int, new_fd: libc::c_int) -> Result<(), JailerError> {
// SAFETY: This is safe because we are using a library function with valid parameters.
SyscallReturnCode(unsafe { libc::dup2(old_fd, new_fd) })
.into_empty_result()
.map_err(JailerError::Dup2)
}
// This is a wrapper for the clone system call. When we want to create a new process in a new
// pid namespace, we will call clone with a NULL stack pointer. We can do this because we will
// not use the CLONE_VM flag, this will result with the original stack replicated, in a similar
// manner to the fork syscall. The libc wrapper prevents use of a NULL stack pointer, so we will
// call the syscall directly.
fn clone(child_stack: *mut libc::c_void, flags: libc::c_int) -> Result<libc::c_int, JailerError> {
SyscallReturnCode(
// SAFETY: This is safe because we are using a library function with valid parameters.
libc::c_int::try_from(unsafe {
// Note: the order of arguments in the raw syscall differs between platforms.
// On x86-64, for example, the parameters passed are `flags`, `stack`, `parent_tid`,
// `child_tid`, and `tls`. But on On x86-32, and several other common architectures
// (including score, ARM, ARM 64) the order of the last two arguments is reversed,
// and instead we must pass `flags`, `stack`, `parent_tid`, `tls`, and `child_tid`.
// This difference in architecture currently doesn't matter because the last 2
// arguments are all 0 but if this were to change we should add an attribute such as
// #[cfg(target_arch = "x86_64")] or #[cfg(target_arch = "aarch64")] for each different
// call.
libc::syscall(libc::SYS_clone, flags, child_stack, 0, 0, 0)
})
// Unwrap is needed because PIDs are 32-bit.
.unwrap(),
)
.into_result()
.map_err(JailerError::Clone)
}
#[derive(Debug, thiserror::Error)]
enum UserfaultfdParseError {
#[error("Could not read /proc/misc: {0}")]
ReadProcMisc(#[from] std::io::Error),
#[error("Could not parse minor number: {0}")]
ParseDevMinor(#[from] std::num::ParseIntError),
#[error("userfaultfd device not loaded")]
NotFound,
}
#[derive(Debug)]
pub struct Env {
id: String,
chroot_dir: PathBuf,
exec_file_path: PathBuf,
uid: u32,
gid: u32,
netns: Option<String>,
daemonize: bool,
new_pid_ns: bool,
start_time_us: u64,
start_time_cpu_us: u64,
jailer_cpu_time_us: u64,
extra_args: Vec<String>,
cgroup_conf: Option<CgroupConfiguration>,
resource_limits: ResourceLimits,
uffd_dev_minor: Option<u32>,
}
impl Env {
pub fn new(
arguments: &arg_parser::Arguments,
start_time_us: u64,
start_time_cpu_us: u64,
proc_mounts: &str,
) -> Result<Self, JailerError> {
// Unwraps should not fail because the arguments are mandatory arguments or with default
// values.
let id = arguments
.single_value("id")
.ok_or_else(|| JailerError::ArgumentParsing(MissingValue("id".to_string())))?;
validators::validate_instance_id(id).map_err(JailerError::InvalidInstanceId)?;
let exec_file = arguments
.single_value("exec-file")
.ok_or_else(|| JailerError::ArgumentParsing(MissingValue("exec-file".to_string())))?;
let (exec_file_path, exec_file_name) = Env::validate_exec_file(exec_file)?;
let chroot_base = arguments.single_value("chroot-base-dir").ok_or_else(|| {
JailerError::ArgumentParsing(MissingValue("chroot-base-dir".to_string()))
})?;
let mut chroot_dir = canonicalize(chroot_base)
.map_err(|err| JailerError::Canonicalize(PathBuf::from(&chroot_base), err))?;
if !chroot_dir.is_dir() {
return Err(JailerError::NotADirectory(chroot_dir));
}
chroot_dir.push(&exec_file_name);
chroot_dir.push(id);
chroot_dir.push("root");
let uid_str = arguments
.single_value("uid")
.ok_or_else(|| JailerError::ArgumentParsing(MissingValue("uid".to_string())))?;
let uid = uid_str
.parse::<u32>()
.map_err(|_| JailerError::Uid(uid_str.to_owned()))?;
let gid_str = arguments
.single_value("gid")
.ok_or_else(|| JailerError::ArgumentParsing(MissingValue("gid".to_string())))?;
let gid = gid_str
.parse::<u32>()
.map_err(|_| JailerError::Gid(gid_str.to_owned()))?;
let netns = arguments.single_value("netns").cloned();
let daemonize = arguments.flag_present("daemonize");
let new_pid_ns = arguments.flag_present("new-pid-ns");
// Optional arguments.
let mut cgroup_conf = None;
let parent_cgroup = match arguments.single_value("parent-cgroup") {
Some(parent_cg) => Path::new(parent_cg),
None => Path::new(&exec_file_name),
};
if parent_cgroup
.components()
.any(|c| c == Component::CurDir || c == Component::ParentDir || c == Component::RootDir)
{
return Err(JailerError::CgroupInvalidParentPath());
}
let cgroup_ver = arguments.single_value("cgroup-version").ok_or_else(|| {
JailerError::ArgumentParsing(MissingValue("cgroup-version".to_string()))
})?;
let cgroup_ver = cgroup_ver
.parse::<u8>()
.map_err(|_| JailerError::CgroupInvalidVersion(cgroup_ver.to_string()))?;
let cgroups_args: &[String] = arguments.multiple_values("cgroup").unwrap_or_default();
// If the --parent-cgroup exists, and we have no other cgroups,
// then the intent is to move the process to that cgroup.
// Only applies to cgroupsv2 since it's a unified hierarchy
if cgroups_args.is_empty() && cgroup_ver == 2 {
let builder = CgroupConfigurationBuilder::new(cgroup_ver, proc_mounts)?;
let cg_parent = builder.get_v2_hierarchy_path()?.join(parent_cgroup);
let cg_parent_procs = cg_parent.join("cgroup.procs");
if cg_parent.exists() {
fs::write(cg_parent_procs, std::process::id().to_string())
.map_err(|_| JailerError::CgroupMove(cg_parent, io::Error::last_os_error()))?;
}
}
// cgroup format: <cgroup_controller>.<cgroup_property>=<value>,...
if let Some(cgroups_args) = arguments.multiple_values("cgroup") {
let mut builder = CgroupConfigurationBuilder::new(cgroup_ver, proc_mounts)?;
for cg in cgroups_args {
let aux: Vec<&str> = cg.split('=').collect();
if aux.len() != 2 || aux[1].is_empty() {
return Err(JailerError::CgroupFormat(cg.to_string()));
}
let file = Path::new(aux[0]);
if file.components().any(|c| {
c == Component::CurDir || c == Component::ParentDir || c == Component::RootDir
}) {
return Err(JailerError::CgroupInvalidFile(cg.to_string()));
}
builder.add_cgroup_property(
aux[0].to_string(), // cgroup file
aux[1].to_string(), // cgroup value
id,
parent_cgroup,
)?;
}
cgroup_conf = Some(builder.build());
}
let mut resource_limits = ResourceLimits::default();
if let Some(args) = arguments.multiple_values("resource-limit") {
Env::parse_resource_limits(&mut resource_limits, args)?;
}
let uffd_dev_minor = Self::get_userfaultfd_minor_dev_number().ok();
Ok(Env {
id: id.to_owned(),
chroot_dir,
exec_file_path,
uid,
gid,
netns,
daemonize,
new_pid_ns,
start_time_us,
start_time_cpu_us,
jailer_cpu_time_us: 0,
extra_args: arguments.extra_args(),
cgroup_conf,
resource_limits,
uffd_dev_minor,
})
}
pub fn chroot_dir(&self) -> &Path {
self.chroot_dir.as_path()
}
pub fn gid(&self) -> u32 {
self.gid
}
pub fn uid(&self) -> u32 {
self.uid
}
fn validate_exec_file(exec_file: &str) -> Result<(PathBuf, String), JailerError> {
let exec_file_path = canonicalize(exec_file)
.map_err(|err| JailerError::Canonicalize(PathBuf::from(exec_file), err))?;
if !exec_file_path.is_file() {
return Err(JailerError::NotAFile(exec_file_path));
}
let exec_file_name = exec_file_path
.file_name()
.ok_or_else(|| JailerError::ExtractFileName(exec_file_path.clone()))?
.to_str()
// Safe to unwrap as the original `exec_file` is `String`.
.unwrap()
.to_string();
Ok((exec_file_path, exec_file_name))
}
fn parse_resource_limits(
resource_limits: &mut ResourceLimits,
args: &[String],
) -> Result<(), JailerError> {
for arg in args {
let (name, value) = arg
.split_once('=')
.ok_or_else(|| JailerError::ResLimitFormat(arg.to_string()))?;
let limit_value = value
.parse::<u64>()
.map_err(|err| JailerError::ResLimitValue(value.to_string(), err.to_string()))?;
match name {
FSIZE_ARG => resource_limits.set_file_size(limit_value),
NO_FILE_ARG => resource_limits.set_no_file(limit_value),
_ => return Err(JailerError::ResLimitArgument(name.to_string())),
}
}
Ok(())
}
fn exec_into_new_pid_ns(&mut self, chroot_exec_file: PathBuf) -> Result<(), JailerError> {
// https://man7.org/linux/man-pages/man7/pid_namespaces.7.html
// > a process in an ancestor namespace can send signals to the "init" process of a child
// > PID namespace only if the "init" process has established a handler for that signal.
//
// Firecracker (i.e. the "init" process of the new PID namespace) sets up handlers for some
// signals including SIGHUP and jailer exits soon after spawning firecracker into a new PID
// namespace. If the jailer process is a session leader and its exit happens after
// firecracker configures the signal handlers, SIGHUP will be sent to firecracker and be
// caught by the handler unexpectedly.
//
// In order to avoid the above issue, if jailer is a session leader, creates a new session
// and makes the child process (i.e. firecracker) become the leader of the new session to
// not get SIGHUP on the exit of jailer.
// Check whether jailer is a session leader or not before clone().
// Note that, if `--daemonize` is passed, jailer is always not a session leader. This is
// because we use the double fork method, making itself not a session leader.
let is_session_leader = match self.daemonize {
true => false,
false => {
// SAFETY: Safe because it doesn't take any input parameters.
let sid = SyscallReturnCode(unsafe { libc::getsid(0) })
.into_result()
.map_err(JailerError::GetSid)?;
// SAFETY: Safe because it doesn't take any input parameters.
let ppid = SyscallReturnCode(unsafe { libc::getpid() })
.into_result()
.map_err(JailerError::GetPid)?;
sid == ppid
}
};
// Duplicate the current process. The child process will belong to the previously created
// PID namespace. The current process will not be moved into the newly created namespace,
// but its first child will assume the role of init(1) in the new namespace.
let pid = clone(std::ptr::null_mut(), libc::CLONE_NEWPID)?;
match pid {
0 => {
if is_session_leader {
// SAFETY: Safe bacause it doesn't take any input parameters.
SyscallReturnCode(unsafe { libc::setsid() })
.into_empty_result()
.map_err(JailerError::SetSid)?;
}
Err(JailerError::Exec(self.exec_command(chroot_exec_file)))
}
child_pid => {
// Save the PID of the process running the exec file provided
// inside <chroot_exec_file>.pid file.
self.save_exec_file_pid(child_pid, chroot_exec_file)?;
// SAFETY: This is safe because 0 is valid input to exit.
unsafe { libc::exit(0) }
}
}
}
fn save_exec_file_pid(
&mut self,
pid: i32,
chroot_exec_file: PathBuf,
) -> Result<(), JailerError> {
let chroot_exec_file_str = chroot_exec_file
.to_str()
.ok_or_else(|| JailerError::ExtractFileName(chroot_exec_file.clone()))?;
let pid_file_path =
PathBuf::from(format!("{}{}", chroot_exec_file_str, PID_FILE_EXTENSION));
let mut pid_file = OpenOptions::new()
.write(true)
.create_new(true)
.open(pid_file_path.clone())
.map_err(|err| JailerError::FileOpen(pid_file_path.clone(), err))?;
// Write PID to file.
write!(pid_file, "{}", pid).map_err(|err| JailerError::Write(pid_file_path, err))
}
fn get_userfaultfd_minor_dev_number() -> Result<u32, UserfaultfdParseError> {
let buf = read_to_string("/proc/misc")?;
for line in buf.lines() {
let dev: Vec<&str> = line.split(' ').collect();
if dev.len() < 2 {
continue;
}
if dev[1] == "userfaultfd" {
return Ok(dev[0].parse::<u32>()?);
}
}
Err(UserfaultfdParseError::NotFound)
}
fn mknod_and_own_dev(
&self,
dev_path: &CStr,
dev_major: u32,
dev_minor: u32,
) -> Result<(), JailerError> {
// As per sysstat.h:
// S_IFCHR -> character special device
// S_IRUSR -> read permission, owner
// S_IWUSR -> write permission, owner
// See www.kernel.org/doc/Documentation/networking/tuntap.txt, 'Configuration' chapter for
// more clarity.
// SAFETY: This is safe because dev_path is CStr, and hence null-terminated.
SyscallReturnCode(unsafe {
libc::mknod(
dev_path.as_ptr(),
libc::S_IFCHR | libc::S_IRUSR | libc::S_IWUSR,
libc::makedev(dev_major, dev_minor),
)
})
.into_empty_result()
.map_err(|err| JailerError::MknodDev(err, dev_path.to_str().unwrap().to_owned()))?;
// SAFETY: This is safe because dev_path is CStr, and hence null-terminated.
SyscallReturnCode(unsafe { libc::chown(dev_path.as_ptr(), self.uid(), self.gid()) })
.into_empty_result()
// Safe to unwrap as we provided valid file names.
.map_err(|err| {
JailerError::ChangeFileOwner(PathBuf::from(dev_path.to_str().unwrap()), err)
})
}
fn setup_jailed_folder(&self, folder: impl AsRef<Path>) -> Result<(), JailerError> {
let folder_path = folder.as_ref();
fs::create_dir_all(folder_path)
.map_err(|err| JailerError::CreateDir(folder_path.to_owned(), err))?;
fs::set_permissions(folder_path, Permissions::from_mode(FOLDER_PERMISSIONS))
.map_err(|err| JailerError::Chmod(folder_path.to_owned(), err))?;
let c_path = CString::new(folder_path.to_str().unwrap()).unwrap();
// SAFETY: This is safe because folder was checked for a null-terminator.
SyscallReturnCode(unsafe { libc::chown(c_path.as_ptr(), self.uid(), self.gid()) })
.into_empty_result()
.map_err(|err| JailerError::ChangeFileOwner(folder_path.to_owned(), err))
}
fn copy_exec_to_chroot(&mut self) -> Result<OsString, JailerError> {
let exec_file_name = self
.exec_file_path
.file_name()
.ok_or_else(|| JailerError::ExtractFileName(self.exec_file_path.clone()))?;
let jailer_exec_file_path = self.chroot_dir.join(exec_file_name);
// We do a copy instead of a hard-link for 2 reasons
// 1. hard-linking is not possible if the file is in another device
// 2. while hardlinking would save up disk space and also memory by sharing parts of the
// Firecracker binary (like the executable .text section), this latter part is not
// desirable in Firecracker's threat model. Copying prevents 2 Firecracker processes from
// sharing memory.
fs::copy(&self.exec_file_path, &jailer_exec_file_path).map_err(|err| {
JailerError::Copy(
self.exec_file_path.clone(),
jailer_exec_file_path.clone(),
err,
)
})?;
Ok(exec_file_name.to_owned())
}
fn join_netns(path: &str) -> Result<(), JailerError> {
// The fd backing the file will be automatically dropped at the end of the scope
let netns =
File::open(path).map_err(|err| JailerError::FileOpen(PathBuf::from(path), err))?;
// SAFETY: Safe because we are passing valid parameters.
SyscallReturnCode(unsafe { libc::setns(netns.as_raw_fd(), libc::CLONE_NEWNET) })
.into_empty_result()
.map_err(JailerError::SetNetNs)
}
fn exec_command(&self, chroot_exec_file: PathBuf) -> io::Error {
Command::new(chroot_exec_file)
.args(["--id", &self.id])
.args(["--start-time-us", &self.start_time_us.to_string()])
.args([
"--start-time-cpu-us",
&get_time_us(ClockType::ProcessCpu).to_string(),
])
.args(["--parent-cpu-time-us", &self.jailer_cpu_time_us.to_string()])
.stdin(Stdio::inherit())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.uid(self.uid())
.gid(self.gid())
.args(&self.extra_args)
.exec()
}
#[cfg(target_arch = "aarch64")]
fn copy_cache_info(&self) -> Result<(), JailerError> {
use crate::{readln_special, to_cstring, writeln_special};
const HOST_CACHE_INFO: &str = "/sys/devices/system/cpu/cpu0/cache";
// Based on https://elixir.free-electrons.com/linux/v4.9.62/source/arch/arm64/kernel/cacheinfo.c#L29.
const MAX_CACHE_LEVEL: u8 = 7;
// These are the files that we need to copy in the chroot so that we can create the
// cache topology.
const FOLDER_HIERARCHY: [&str; 6] = [
"size",
"level",
"type",
"shared_cpu_map",
"coherency_line_size",
"number_of_sets",
];
// We create the cache folder inside the chroot and then change its permissions.
let jailer_cache_dir =
Path::new(self.chroot_dir()).join("sys/devices/system/cpu/cpu0/cache/");
fs::create_dir_all(&jailer_cache_dir)
.map_err(|err| JailerError::CreateDir(jailer_cache_dir.to_owned(), err))?;
for index in 0..(MAX_CACHE_LEVEL + 1) {
let index_folder = format!("index{}", index);
let host_path = PathBuf::from(HOST_CACHE_INFO).join(&index_folder);
if fs::metadata(&host_path).is_err() {
// It means the folder does not exist, i.e we exhausted the number of cache levels
// existent on the host.
break;
}
// We now create the destination folder in the jailer.
let jailer_path = jailer_cache_dir.join(&index_folder);
fs::create_dir_all(&jailer_path)
.map_err(|err| JailerError::CreateDir(jailer_path.to_owned(), err))?;
// We now read the contents of the current directory and copy the files we are
// interested in to the destination path.
for entry in FOLDER_HIERARCHY.iter() {
let host_cache_file = host_path.join(entry);
let jailer_cache_file = jailer_path.join(entry);
if let Ok(line) = readln_special(&host_cache_file) {
writeln_special(&jailer_cache_file, line)?;
// We now change the permissions.
let dest_path_cstr = to_cstring(&jailer_cache_file)?;
// SAFETY: Safe because dest_path_cstr is null-terminated.
SyscallReturnCode(unsafe {
libc::chown(dest_path_cstr.as_ptr(), self.uid(), self.gid())
})
.into_empty_result()
.map_err(|err| {
JailerError::ChangeFileOwner(jailer_cache_file.to_owned(), err)
})?;
}
}
}
Ok(())
}
#[cfg(target_arch = "aarch64")]
fn copy_midr_el1_info(&self) -> Result<(), JailerError> {
use crate::{readln_special, to_cstring, writeln_special};
const HOST_MIDR_EL1_INFO: &str = "/sys/devices/system/cpu/cpu0/regs/identification";
let jailer_midr_el1_directory =
Path::new(self.chroot_dir()).join("sys/devices/system/cpu/cpu0/regs/identification/");
fs::create_dir_all(&jailer_midr_el1_directory)
.map_err(|err| JailerError::CreateDir(jailer_midr_el1_directory.to_owned(), err))?;
let host_midr_el1_file = PathBuf::from(format!("{}/midr_el1", HOST_MIDR_EL1_INFO));
let jailer_midr_el1_file = jailer_midr_el1_directory.join("midr_el1");
// Read and copy the MIDR_EL1 file to Jailer
let line = readln_special(&host_midr_el1_file)?;
writeln_special(&jailer_midr_el1_file, line)?;
// Change the permissions.
let dest_path_cstr = to_cstring(&jailer_midr_el1_file)?;
// SAFETY: Safe because `dest_path_cstr` is null-terminated.
SyscallReturnCode(unsafe { libc::chown(dest_path_cstr.as_ptr(), self.uid(), self.gid()) })
.into_empty_result()
.map_err(|err| JailerError::ChangeFileOwner(jailer_midr_el1_file.to_owned(), err))?;
Ok(())
}
pub fn run(mut self) -> Result<(), JailerError> {
let exec_file_name = self.copy_exec_to_chroot()?;
let chroot_exec_file = PathBuf::from("/").join(exec_file_name);
// Join the specified network namespace, if applicable.
if let Some(ref path) = self.netns {
Env::join_netns(path)?;
}
// Set limits on resources.
self.resource_limits.install()?;
// We have to setup cgroups at this point, because we can't do it anymore after chrooting.
if let Some(ref conf) = self.cgroup_conf {
conf.setup()?;
}
// If daemonization was requested, open /dev/null before chrooting.
let dev_null = if self.daemonize {
Some(File::open("/dev/null").map_err(JailerError::OpenDevNull)?)
} else {
None
};
#[cfg(target_arch = "aarch64")]
self.copy_cache_info()?;
#[cfg(target_arch = "aarch64")]
self.copy_midr_el1_info()?;
// Jail self.
chroot(self.chroot_dir())?;
// This will not only create necessary directories, but will also change ownership
// for all of them.
FOLDER_HIERARCHY
.iter()
.try_for_each(|f| self.setup_jailed_folder(f))?;
// Here we are creating the /dev/kvm and /dev/net/tun devices inside the jailer.
// Following commands can be translated into bash like this:
// $: mkdir -p $chroot_dir/dev/net
// $: dev_net_tun_path={$chroot_dir}/"tun"
// $: mknod $dev_net_tun_path c 10 200
// www.kernel.org/doc/Documentation/networking/tuntap.txt specifies 10 and 200 as the major
// and minor for the /dev/net/tun device.
self.mknod_and_own_dev(DEV_NET_TUN, DEV_NET_TUN_MAJOR, DEV_NET_TUN_MINOR)?;
// Do the same for /dev/kvm with (major, minor) = (10, 232).
self.mknod_and_own_dev(DEV_KVM, DEV_KVM_MAJOR, DEV_KVM_MINOR)?;
// And for /dev/urandom with (major, minor) = (1, 9).
// If the device is not accessible on the host, output a warning to inform user that MMDS
// version 2 will not be available to use.
let _ = self
.mknod_and_own_dev(DEV_URANDOM, DEV_URANDOM_MAJOR, DEV_URANDOM_MINOR)
.map_err(|err| {
println!(
"Warning! Could not create /dev/urandom device inside jailer: {}.",
err
);
println!("MMDS version 2 will not be available to use.");
});
// If we have a minor version for /dev/userfaultfd the device is present on the host.
// Expose the device in the jailed environment.
if let Some(minor) = self.uffd_dev_minor {
self.mknod_and_own_dev(DEV_UFFD_PATH, DEV_UFFD_MAJOR, minor)?;
}
self.jailer_cpu_time_us = get_time_us(ClockType::ProcessCpu) - self.start_time_cpu_us;
// Daemonize before exec, if so required (when the dev_null variable != None).
if let Some(dev_null) = dev_null {
// We follow the double fork method to daemonize the jailer referring to
// https://0xjet.github.io/3OHA/2022/04/11/post.html
// setsid() will fail if the calling process is a process group leader.
// By calling fork(), we guarantee that the newly created process inherits
// the PGID from its parent and, therefore, is not a process group leader.
// SAFETY: Safe because it's a library function.
let child_pid = unsafe { libc::fork() };
if child_pid < 0 {
return Err(JailerError::Daemonize(io::Error::last_os_error()));
}
if child_pid != 0 {
// parent exiting
exit(0);
}
// Call setsid() in child
// SAFETY: Safe because it's a library function.
SyscallReturnCode(unsafe { libc::setsid() })
.into_empty_result()
.map_err(JailerError::SetSid)?;
// Meter CPU usage after first fork()
self.jailer_cpu_time_us += get_time_us(ClockType::ProcessCpu);
// Daemons should not have controlling terminals.
// If a daemon has a controlling terminal, it can receive signals
// from it that might cause it to halt or exit unexpectedly.
// The second fork() ensures that grandchild is not a session,
// leader and thus cannot reacquire a controlling terminal.
// SAFETY: Safe because it's a library function.
let grandchild_pid = unsafe { libc::fork() };
if grandchild_pid < 0 {
return Err(JailerError::Daemonize(io::Error::last_os_error()));
}
if grandchild_pid != 0 {
// child exiting
exit(0);
}
// grandchild is the daemon
// Replace the stdio file descriptors with the /dev/null fd.
dup2(dev_null.as_raw_fd(), STDIN_FILENO)?;
dup2(dev_null.as_raw_fd(), STDOUT_FILENO)?;
dup2(dev_null.as_raw_fd(), STDERR_FILENO)?;
// Meter CPU usage after second fork()
self.jailer_cpu_time_us += get_time_us(ClockType::ProcessCpu);
}
// If specified, exec the provided binary into a new PID namespace.
if self.new_pid_ns {
self.exec_into_new_pid_ns(chroot_exec_file)
} else {
self.save_exec_file_pid(id().try_into().unwrap(), chroot_exec_file.clone())?;
Err(JailerError::Exec(self.exec_command(chroot_exec_file)))
}
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::fs::create_dir_all;
use std::os::linux::fs::MetadataExt;
use vmm_sys_util::rand;
use vmm_sys_util::tempdir::TempDir;
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::build_arg_parser;
use crate::cgroup::test_util::MockCgroupFs;
fn get_pseudo_exec_file_path() -> String {
format!(
"/tmp/{}/pseudo_firecracker_exec_file",
rand::rand_alphanumerics(4).into_string().unwrap()
)
}
#[derive(Debug, Clone)]
struct ArgVals<'a> {
pub id: &'a str,
pub exec_file: &'a str,
pub uid: &'a str,
pub gid: &'a str,
pub chroot_base: &'a str,
pub netns: Option<&'a str>,
pub daemonize: bool,
pub new_pid_ns: bool,
pub cgroups: Vec<&'a str>,
pub resource_limits: Vec<&'a str>,
pub parent_cgroup: Option<&'a str>,
}
impl<'a> ArgVals<'a> {
pub fn new(pseudo_exec_file_path: &'a str) -> ArgVals<'a> {
let pseudo_exec_file_dir = Path::new(&pseudo_exec_file_path).parent().unwrap();
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/jailer/src/resource_limits.rs | src/jailer/src/resource_limits.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt;
use std::fmt::{Display, Formatter};
use vmm_sys_util::syscall::SyscallReturnCode;
use super::JailerError;
// Default limit for the maximum number of file descriptors open at a time.
const NO_FILE: u64 = 2048;
// File size resource argument name.
pub(crate) const FSIZE_ARG: &str = "fsize";
// Number of files resource argument name.
pub(crate) const NO_FILE_ARG: &str = "no-file";
#[derive(Debug, Clone, Copy)]
pub enum Resource {
// Size of created files.
RlimitFsize,
// Number of open file descriptors.
RlimitNoFile,
}
impl From<Resource> for u32 {
fn from(resource: Resource) -> u32 {
match resource {
#[allow(clippy::unnecessary_cast)]
#[allow(clippy::cast_possible_wrap)]
// Definition of libc::RLIMIT_FSIZE depends on the target_env:
// * when equals to "musl" -> libc::RLIMIT_FSIZE is a c_int (which is an i32)
// * when equals to "gnu" -> libc::RLIMIT_FSIZE is __rlimit_resource_t which is a
// c_uint (which is an u32)
Resource::RlimitFsize => libc::RLIMIT_FSIZE as u32,
#[allow(clippy::unnecessary_cast)]
#[allow(clippy::cast_possible_wrap)]
// Definition of libc::RLIMIT_NOFILE depends on the target_env:
// * when equals to "musl" -> libc::RLIMIT_NOFILE is a c_int (which is an i32)
// * when equals to "gnu" -> libc::RLIMIT_NOFILE is __rlimit_resource_t which is a
// c_uint (which is an u32)
Resource::RlimitNoFile => libc::RLIMIT_NOFILE as u32,
}
}
}
impl From<Resource> for i32 {
fn from(resource: Resource) -> i32 {
match resource {
#[allow(clippy::unnecessary_cast)]
#[allow(clippy::cast_possible_wrap)]
// Definition of libc::RLIMIT_FSIZE depends on the target_env:
// * when equals to "musl" -> libc::RLIMIT_FSIZE is a c_int (which is an i32)
// * when equals to "gnu" -> libc::RLIMIT_FSIZE is __rlimit_resource_t which is a
// c_uint (which is an u32)
Resource::RlimitFsize => libc::RLIMIT_FSIZE as i32,
#[allow(clippy::unnecessary_cast)]
#[allow(clippy::cast_possible_wrap)]
// Definition of libc::RLIMIT_NOFILE depends on the target_env:
// * when equals to "musl" -> libc::RLIMIT_NOFILE is a c_int (which is an i32)
// * when equals to "gnu" -> libc::RLIMIT_NOFILE is __rlimit_resource_t which is a
// c_uint (which is an u32)
Resource::RlimitNoFile => libc::RLIMIT_NOFILE as i32,
}
}
}
impl Display for Resource {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Resource::RlimitFsize => write!(f, "size of file"),
Resource::RlimitNoFile => write!(f, "number of file descriptors"),
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct ResourceLimits {
file_size: Option<u64>,
no_file: u64,
}
impl Default for ResourceLimits {
fn default() -> Self {
ResourceLimits {
file_size: None,
no_file: NO_FILE,
}
}
}
impl ResourceLimits {
pub fn install(self) -> Result<(), JailerError> {
if let Some(file_size) = self.file_size {
// Set file size limit.
ResourceLimits::set_limit(Resource::RlimitFsize, file_size)?;
}
// Set limit on number of file descriptors.
ResourceLimits::set_limit(Resource::RlimitNoFile, self.no_file)?;
Ok(())
}
fn set_limit(resource: Resource, target: libc::rlim_t) -> Result<(), JailerError> {
let rlim: libc::rlimit = libc::rlimit {
rlim_cur: target,
rlim_max: target,
};
// SAFETY: Safe because `resource` is a known-valid constant, and `&rlim`
// is non-dangling.
SyscallReturnCode(unsafe { libc::setrlimit(resource.into(), &rlim) })
.into_empty_result()
.map_err(|_| JailerError::Setrlimit(resource.to_string()))
}
pub fn set_file_size(&mut self, file_size: u64) {
self.file_size = Some(file_size);
}
pub fn set_no_file(&mut self, no_file: u64) {
self.no_file = no_file;
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use super::*;
#[test]
#[allow(clippy::unnecessary_cast)]
fn test_from_resource() {
assert_eq!(u32::from(Resource::RlimitFsize), libc::RLIMIT_FSIZE as u32);
assert_eq!(
u32::from(Resource::RlimitNoFile),
libc::RLIMIT_NOFILE as u32
);
}
#[test]
fn test_display_resource() {
assert_eq!(
Resource::RlimitFsize.to_string(),
"size of file".to_string()
);
assert_eq!(
Resource::RlimitNoFile.to_string(),
"number of file descriptors".to_string()
);
}
#[test]
fn test_default_resource_limits() {
let mut rlimits = ResourceLimits::default();
assert!(rlimits.file_size.is_none());
assert_eq!(rlimits.no_file, NO_FILE);
rlimits.set_file_size(1);
assert_eq!(rlimits.file_size.unwrap(), 1);
rlimits.set_no_file(1);
assert_eq!(rlimits.no_file, 1);
}
#[test]
fn test_set_resource_limits() {
let resource = Resource::RlimitNoFile;
let new_limit = NO_FILE - 1;
// Get current file size limit.
let mut rlim: libc::rlimit = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
unsafe { libc::getrlimit(resource.into(), &mut rlim) };
assert_ne!(rlim.rlim_cur, new_limit);
assert_ne!(rlim.rlim_max, new_limit);
// Set new file size limit.
ResourceLimits::set_limit(resource, new_limit).unwrap();
// Verify new limit.
let mut rlim: libc::rlimit = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
unsafe { libc::getrlimit(resource.into(), &mut rlim) };
assert_eq!(rlim.rlim_cur, new_limit);
assert_eq!(rlim.rlim_max, new_limit);
}
#[test]
fn test_install() {
// Setup the resource limits
let mut rlimits = ResourceLimits::default();
let new_file_size_limit = 2097151;
let new_no_file_limit = 1000;
rlimits.set_file_size(new_file_size_limit);
rlimits.set_no_file(new_no_file_limit);
// Install the new limits to file size and
// the number of file descriptors
rlimits.install().unwrap();
// Verify the new limit for file size
let file_size_resource = Resource::RlimitFsize;
let mut file_size_limit: libc::rlimit = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
unsafe { libc::getrlimit(file_size_resource.into(), &mut file_size_limit) };
assert_eq!(file_size_limit.rlim_cur, new_file_size_limit);
assert_eq!(file_size_limit.rlim_max, new_file_size_limit);
// Verify the new limit for the number of file descriptors
let file_descriptor_resource = Resource::RlimitNoFile;
let mut file_descriptor_limit: libc::rlimit = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
unsafe { libc::getrlimit(file_descriptor_resource.into(), &mut file_descriptor_limit) };
assert_eq!(file_descriptor_limit.rlim_cur, new_no_file_limit);
assert_eq!(file_descriptor_limit.rlim_max, new_no_file_limit);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/jailer/src/cgroup.rs | src/jailer/src/cgroup.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::collections::{HashMap, HashSet};
use std::fmt::Debug;
use std::fs::{self, File};
use std::io::{BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::process;
use regex::Regex;
use crate::{JailerError, readln_special, writeln_special};
// Holds information on a cgroup mount point discovered on the system
#[derive(Debug)]
struct CgroupMountPoint {
dir: String,
options: String,
}
// Holds a cache of discovered mount points and cgroup hierarchies
#[derive(Debug)]
struct CgroupHierarchies {
hierarchies: HashMap<String, PathBuf>,
mount_points: Vec<CgroupMountPoint>,
}
impl CgroupHierarchies {
// Constructs a new cache of hierarchies and mount points
// It will discover cgroup mount points and hierarchies configured
// on the system and cache the info required to create cgroups later
// within this hierarchies
fn new(ver: u8, proc_mounts_path: &str) -> Result<Self, JailerError> {
let mut h = CgroupHierarchies {
hierarchies: HashMap::new(),
mount_points: Vec::new(),
};
// search PROC_MOUNTS for cgroup mount points
let f = File::open(proc_mounts_path)
.map_err(|err| JailerError::FileOpen(PathBuf::from(proc_mounts_path), err))?;
// Regex courtesy of Filippo.
// This will match on each line from /proc/mounts for both v1 and v2 mount points.
//
// /proc/mounts cointains lines that look like this:
// cgroup2 /sys/fs/cgroup/unified cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate 0 0
// cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
//
// This Regex will extract:
// * "/sys/fs/cgroup/unified" in the "dir" capture group.
// * "2" in the "ver" capture group as the cgroup version taken from "cgroup2"; for v1,
// the "ver" capture group will be empty (len = 0).
// * "[...],relatime,cpu,cpuacct" in the "options" capture group; this is used for
// cgroupv1 to determine what controllers are mounted at the location.
let re = Regex::new(
r"^([a-z2]*)[[:space:]](?P<dir>.*)[[:space:]]cgroup(?P<ver>2?)[[:space:]](?P<options>.*)[[:space:]]0[[:space:]]0$",
).map_err(JailerError::RegEx)?;
for l in BufReader::new(f).lines() {
let l = l.map_err(|err| JailerError::ReadLine(PathBuf::from(proc_mounts_path), err))?;
if let Some(capture) = re.captures(&l) {
if ver == 2 && capture["ver"].len() == 1 {
// Found the cgroupv2 unified mountpoint; with cgroupsv2 there is only one
// hierarchy so we insert it in the hashmap to use it later when creating
// cgroups
h.hierarchies
.insert("unified".to_string(), PathBuf::from(&capture["dir"]));
break;
} else if ver == 1 && capture["ver"].is_empty() {
// Found a cgroupv1 mountpoint; with cgroupsv1 we can have multiple hierarchies.
// Since we don't know which one will be used, we cache the mountpoints now,
// and will create the hierarchies on demand when a cgroup is built.
h.mount_points.push(CgroupMountPoint {
dir: String::from(&capture["dir"]),
options: String::from(&capture["options"]),
});
}
}
}
if h.hierarchies.is_empty() && h.mount_points.is_empty() {
Err(JailerError::CgroupHierarchyMissing(
"No hierarchy found for this cgroup version.".to_string(),
))
} else {
Ok(h)
}
}
// Returns the path to the root of the hierarchy for the controller specified
// Cgroups for a controller are arranged in a hierarchy; multiple controllers
// may share the same hierarchy
fn get_v1_hierarchy_path(&mut self, controller: &str) -> Result<&PathBuf, JailerError> {
// First try and see if the path is already discovered.
match self.hierarchies.entry(controller.to_string()) {
Occupied(entry) => Ok(entry.into_mut()),
Vacant(entry) => {
// Since the path for this controller type was not already discovered
// we need to search through the mount points to find it
let mut path = None;
for m in self.mount_points.iter() {
if m.options.split(',').any(|x| x == controller) {
path = Some(PathBuf::from(&m.dir));
break;
}
}
// It's possible that the controller is not mounted or a bad controller
// name was specified. Return an error in this case
match path {
Some(p) => Ok(entry.insert(p)),
None => Err(JailerError::CgroupControllerUnavailable(
controller.to_string(),
)),
}
}
}
}
// Returns the path to the root of the hierarchy
pub fn get_v2_hierarchy_path(&self) -> Result<&PathBuf, JailerError> {
match self.hierarchies.get("unified") {
Some(entry) => Ok(entry),
None => Err(JailerError::CgroupHierarchyMissing(
"cgroupsv2 hierarchy missing".to_string(),
)),
}
}
}
// Allows creation of cgroups on the system for both versions
#[derive(Debug)]
pub struct CgroupConfigurationBuilder {
hierarchies: CgroupHierarchies,
cgroup_conf: CgroupConfiguration,
}
impl CgroupConfigurationBuilder {
// Creates the builder object
// It will initialize the CgroupHierarchy cache.
pub fn new(ver: u8, proc_mounts_path: &str) -> Result<Self, JailerError> {
Ok(CgroupConfigurationBuilder {
hierarchies: CgroupHierarchies::new(ver, proc_mounts_path)?,
cgroup_conf: match ver {
1 => Ok(CgroupConfiguration::V1(HashMap::new())),
2 => Ok(CgroupConfiguration::V2(HashMap::new())),
_ => Err(JailerError::CgroupInvalidVersion(ver.to_string())),
}?,
})
}
// Adds a cgroup property to the configuration
pub fn add_cgroup_property(
&mut self,
file: String,
value: String,
id: &str,
parent_cg: &Path,
) -> Result<(), JailerError> {
match self.cgroup_conf {
CgroupConfiguration::V1(ref mut cgroup_conf_v1) => {
let controller = get_controller_from_filename(&file)?;
let path = self.hierarchies.get_v1_hierarchy_path(controller)?;
let cgroup = cgroup_conf_v1
.entry(String::from(controller))
.or_insert(CgroupV1::new(id, parent_cg, path)?);
cgroup.add_property(file, value)?;
Ok(())
}
CgroupConfiguration::V2(ref mut cgroup_conf_v2) => {
let path = self.hierarchies.get_v2_hierarchy_path()?;
let cgroup = cgroup_conf_v2
.entry(String::from("unified"))
.or_insert(CgroupV2::new(id, parent_cg, path)?);
cgroup.add_property(file, value)?;
Ok(())
}
}
}
pub fn build(self) -> CgroupConfiguration {
self.cgroup_conf
}
// Returns the path to the unified controller
pub fn get_v2_hierarchy_path(&self) -> Result<&PathBuf, JailerError> {
self.hierarchies.get_v2_hierarchy_path()
}
}
#[derive(Debug)]
struct CgroupProperty {
file: String, // file representing the cgroup (e.g cpuset.mems).
value: String, // value that will be written into the file.
}
#[derive(Debug)]
struct CgroupBase {
properties: Vec<CgroupProperty>,
location: PathBuf, // microVM cgroup location for the specific controller.
}
#[derive(Debug)]
pub struct CgroupV1 {
base: CgroupBase,
cg_parent_depth: u16, // depth of the nested cgroup hierarchy
}
#[derive(Debug)]
pub struct CgroupV2 {
base: CgroupBase,
available_controllers: HashSet<String>,
}
pub trait Cgroup: Debug {
// Adds a property (file-value) to the group
fn add_property(&mut self, file: String, value: String) -> Result<(), JailerError>;
// Write the all cgroup property values into the cgroup property files.
fn write_values(&self) -> Result<(), JailerError>;
// This function will assign the process associated with the pid to the respective cgroup.
fn attach_pid(&self) -> Result<(), JailerError>;
}
#[derive(Debug)]
pub enum CgroupConfiguration {
V1(HashMap<String, CgroupV1>),
V2(HashMap<String, CgroupV2>),
}
impl CgroupConfiguration {
pub fn setup(&self) -> Result<(), JailerError> {
match self {
Self::V1(conf) => setup_cgroup_conf(conf),
Self::V2(conf) => setup_cgroup_conf(conf),
}
}
}
// If we call inherit_from_parent_aux(.../A/B/C, file, condition), the following will happen:
// 1) If .../A/B/C/file does not exist, or if .../A/B/file does not exist, return an error.
// 2) If .../A/B/file is not empty, write the first line of .../A/B/file into .../A/B/C/file
// and return.
// 3) If ../A/B/file exists but it is empty, call inherit_from_parent_aux(.../A/B, file, false).
// 4) If .../A/B/file is no longer empty, write the first line of .../A/B/file into
// .../A/B/C/file, and return.
// 5) Otherwise, return an error.
// How is this helpful? When creating cgroup folders for the jailer Firecracker instance, the jailer
// will create a hierarchy that looks like <cgroup_base>/<parent_cgroup>/<id>. Depending on each
// particular cgroup controller, <cgroup_base> contains a number of configuration files. These are
// not actually present on a disk; they are special files exposed by the controller, and they
// usually contain a single line with some configuration value(s). When the "parent_cgroup" and <id>
// subfolders are created, configuration files with the same name appear automatically in the new
// folders, but their contents are not always automatically populated. Moreover,
// if <cgroup_base>/<parent_cgroup>/some_file is empty, then we cannot have a non-empty file with
// at <cgroup_base>/<parent_cgroup>/<id>/some_file. The inherit_from_parent function (which is based
// on the following helper function) helps with propagating the values.
// There is also a potential race condition mentioned below. Here is what it refers to: let's say we
// start multiple jailer processes, and one of them calls
// inherit_from_parent_aux(/A/<parent_cgroup>/id1, file, true), and hits case number 3) from the
// list above, thus recursively calling inherit_from_parent_aux(/A/<parent_cgroup>, file, false).
// It's entirely possible there was another process in the exact same situations, and that process
// gets to write something to /A/<parent_cgroup>/file first. In this case, the recursive call made
// by the first process to inherit_from_parent_aux(/A/<parent_cgroup>, file, false) may fail when
// writing to /A/<parent_cgroup>/file, but we can still continue, because step 4) only cares about
// the file no longer being empty, regardless of who actually got to populated its contents.
fn inherit_from_parent_aux(
path: &Path,
file_name: &str,
retry_depth: u16,
) -> Result<(), JailerError> {
// The function with_file_name() replaces the last component of a path with the given name.
let parent_file = path.with_file_name(file_name);
let mut line = readln_special(&parent_file)?;
if line.is_empty() {
if retry_depth > 0 {
// We have to borrow "parent" from "parent_file" as opposed to "path", because then
// we wouldn't be able to mutably borrow path at the end of this function (at least not
// according to how the Rust borrow checker operates right now :-s)
let parent = parent_file
.parent()
.ok_or_else(|| JailerError::MissingParent(parent_file.clone()))?;
// Trying to avoid the race condition described above. We don't care about the result,
// because we check once more if line.is_empty() after the end of this block.
let _ = inherit_from_parent_aux(parent, file_name, retry_depth - 1);
line = readln_special(&parent_file)?;
}
if line.is_empty() {
return Err(JailerError::CgroupInheritFromParent(
path.to_path_buf(),
file_name.to_string(),
));
}
}
writeln_special(&path.join(file_name), &line)?;
Ok(())
}
fn inherit_from_parent(path: &Path, file_name: &str, depth: u16) -> Result<(), JailerError> {
inherit_from_parent_aux(path, file_name, depth)
}
// Extract the controller name from the cgroup file. The cgroup file must follow
// this format: <cgroup_controller>.<cgroup_property>.
fn get_controller_from_filename(file: &str) -> Result<&str, JailerError> {
let v: Vec<&str> = file.split('.').collect();
// Check format <cgroup_controller>.<cgroup_property>
if v.len() < 2 {
return Err(JailerError::CgroupInvalidFile(file.to_string()));
}
Ok(v[0])
}
impl CgroupV1 {
// Create a new cgroupsv1 controller
pub fn new(id: &str, parent_cg: &Path, controller_path: &Path) -> Result<Self, JailerError> {
let mut path = controller_path.to_path_buf();
path.push(parent_cg);
path.push(id);
let mut depth = 0;
for _ in parent_cg.components() {
depth += 1;
}
Ok(CgroupV1 {
base: CgroupBase {
properties: Vec::new(),
location: path,
},
cg_parent_depth: depth,
})
}
}
impl Cgroup for CgroupV1 {
fn add_property(&mut self, file: String, value: String) -> Result<(), JailerError> {
self.base.properties.push(CgroupProperty { file, value });
Ok(())
}
fn write_values(&self) -> Result<(), JailerError> {
// Create the cgroup directory for the controller.
fs::create_dir_all(&self.base.location)
.map_err(|err| JailerError::CreateDir(self.base.location.clone(), err))?;
for property in self.base.properties.iter() {
// Write the corresponding cgroup value. inherit_from_parent is used to
// correctly propagate the value if not defined.
inherit_from_parent(&self.base.location, &property.file, self.cg_parent_depth)?;
writeln_special(&self.base.location.join(&property.file), &property.value)?;
}
Ok(())
}
fn attach_pid(&self) -> Result<(), JailerError> {
let pid = process::id();
let location = &self.base.location.join("tasks");
writeln_special(location, pid)?;
Ok(())
}
}
impl CgroupV2 {
// Enables the specified controller along the cgroup nested path.
// To be able to use a leaf controller within a nested cgroup hierarchy,
// the controller needs to be enabled by writing to the cgroup.subtree_control
// of it's parent. This rule applies recursively.
fn write_all_subtree_control<P>(path: P, controller: &str) -> Result<(), JailerError>
where
P: AsRef<Path> + Debug,
{
let cg_subtree_ctrl = path.as_ref().join("cgroup.subtree_control");
if !cg_subtree_ctrl.exists() {
return Ok(());
}
let parent = match path.as_ref().parent() {
Some(p) => p,
None => {
writeln_special(&cg_subtree_ctrl, format!("+{}", &controller))?;
return Ok(());
}
};
Self::write_all_subtree_control(parent, controller)?;
writeln_special(&cg_subtree_ctrl, format!("+{}", &controller))
}
// Returns controllers that can be enabled from the cgroup path specified
// by the mount_point parameter
fn detect_available_controllers<P>(mount_point: P) -> HashSet<String>
where
P: AsRef<Path> + Debug,
{
let mut controllers = HashSet::new();
let controller_list_file = mount_point.as_ref().join("cgroup.controllers");
let f = match File::open(controller_list_file) {
Ok(f) => f,
Err(_) => return controllers,
};
for l in BufReader::new(f).lines().map_while(Result::ok) {
for controller in l.split(' ') {
controllers.insert(controller.to_string());
}
}
controllers
}
// Create a new cgroupsv2 controller
pub fn new(id: &str, parent_cg: &Path, unified_path: &Path) -> Result<Self, JailerError> {
let mut path = unified_path.to_path_buf();
path.push(parent_cg);
path.push(id);
Ok(CgroupV2 {
base: CgroupBase {
properties: Vec::new(),
location: path,
},
available_controllers: Self::detect_available_controllers(unified_path),
})
}
}
impl Cgroup for CgroupV2 {
fn add_property(&mut self, file: String, value: String) -> Result<(), JailerError> {
let controller = get_controller_from_filename(&file)?;
if self.available_controllers.contains(controller) {
self.base.properties.push(CgroupProperty { file, value });
Ok(())
} else {
Err(JailerError::CgroupControllerUnavailable(
controller.to_string(),
))
}
}
fn write_values(&self) -> Result<(), JailerError> {
let mut enabled_controllers: HashSet<&str> = HashSet::new();
// Create the cgroup directory for the controller.
fs::create_dir_all(&self.base.location)
.map_err(|err| JailerError::CreateDir(self.base.location.clone(), err))?;
// Ok to unwrap since the path was just created.
let parent = self.base.location.parent().unwrap();
for property in self.base.properties.iter() {
let controller = get_controller_from_filename(&property.file)?;
// enable controllers only once
if !enabled_controllers.contains(controller) {
// Enable the controller in all parent directories
CgroupV2::write_all_subtree_control(parent, controller)?;
enabled_controllers.insert(controller);
}
writeln_special(&self.base.location.join(&property.file), &property.value)?;
}
Ok(())
}
fn attach_pid(&self) -> Result<(), JailerError> {
let pid = process::id();
let location = &self.base.location.join("cgroup.procs");
writeln_special(location, pid)?;
Ok(())
}
}
pub fn setup_cgroup_conf(conf: &HashMap<String, impl Cgroup>) -> Result<(), JailerError> {
// cgroups are iterated two times as some cgroups may require others (e.g cpuset requires
// cpuset.mems and cpuset.cpus) to be set before attaching any pid.
for cgroup in conf.values() {
cgroup.write_values()?;
}
for cgroup in conf.values() {
cgroup.attach_pid()?;
}
Ok(())
}
#[cfg(test)]
pub mod test_util {
use std::fmt::Debug;
use std::fs::{self, File, OpenOptions};
use std::io::Write;
use std::path::{Path, PathBuf};
use vmm_sys_util::tempdir::TempDir;
#[derive(Debug)]
pub struct MockCgroupFs {
mounts_file: File,
// kept to clean up on Drop
_mock_jailer_dir: TempDir,
pub proc_mounts_path: PathBuf,
pub sys_cgroups_path: PathBuf,
}
// Helper object that simulates the layout of the cgroup file system
// This can be used for testing regardless of the availability of a particular
// version of cgroups on the system
impl MockCgroupFs {
pub fn create_file_with_contents<P: AsRef<Path> + Debug>(
filename: P,
contents: &str,
) -> Result<(), std::io::Error> {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&filename)?;
writeln!(file, "{}", contents)?;
Ok(())
}
pub fn new() -> Result<MockCgroupFs, std::io::Error> {
let mock_jailer_dir = TempDir::new().unwrap();
let mock_proc_mounts = mock_jailer_dir.as_path().join("proc/mounts");
let mock_sys_cgroups = mock_jailer_dir.as_path().join("sys_cgroup");
// create a mock /proc/mounts file in a temporary directory
fs::create_dir_all(mock_proc_mounts.parent().unwrap())?;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(mock_proc_mounts.clone())?;
Ok(MockCgroupFs {
mounts_file: file,
_mock_jailer_dir: mock_jailer_dir,
proc_mounts_path: mock_proc_mounts,
sys_cgroups_path: mock_sys_cgroups,
})
}
// Populate the mocked proc/mounts file with cgroupv2 entries
// Also create a directory structure that simulates cgroupsv2 layout
pub fn add_v2_mounts(&mut self) -> Result<(), std::io::Error> {
writeln!(
self.mounts_file,
"cgroupv2 {}/unified cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate 0 0",
self.sys_cgroups_path.to_str().unwrap(),
)?;
let cg_unified_path = self.sys_cgroups_path.join("unified");
fs::create_dir_all(&cg_unified_path)?;
Self::create_file_with_contents(
cg_unified_path.join("cgroup.controllers"),
"cpuset cpu io memory pids",
)?;
Self::create_file_with_contents(cg_unified_path.join("cgroup.subtree_control"), "")?;
Ok(())
}
// Populate the mocked proc/mounts file with cgroupv1 entries
pub fn add_v1_mounts(&mut self) -> Result<(), std::io::Error> {
let controllers = vec![
"memory",
"net_cls,net_prio",
"pids",
"cpuset",
"cpu,cpuacct",
];
for c in &controllers {
writeln!(
self.mounts_file,
"cgroup {}/{} cgroup rw,nosuid,nodev,noexec,relatime,{} 0 0",
self.sys_cgroups_path.to_str().unwrap(),
c,
c,
)?;
}
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use std::fmt::Debug;
use std::io::{BufReader, Write};
use std::path::PathBuf;
use vmm_sys_util::tempdir::TempDir;
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::cgroup::test_util::MockCgroupFs;
// Utility function to read the first line in a file
fn read_first_line<P>(filename: P) -> Result<String, std::io::Error>
where
P: AsRef<Path> + Debug,
{
let file = File::open(filename)?;
let mut reader = BufReader::new(file);
let mut buf = String::new();
reader.read_line(&mut buf)?;
Ok(buf)
}
#[test]
fn test_cgroup_conf_builder_invalid_version() {
let mock_cgroups = MockCgroupFs::new().unwrap();
let builder =
CgroupConfigurationBuilder::new(0, mock_cgroups.proc_mounts_path.to_str().unwrap());
builder.unwrap_err();
}
#[test]
fn test_cgroup_conf_builder_no_mounts() {
let mock_cgroups = MockCgroupFs::new().unwrap();
let builder =
CgroupConfigurationBuilder::new(1, mock_cgroups.proc_mounts_path.to_str().unwrap());
builder.unwrap_err();
}
#[test]
fn test_cgroup_conf_builder_v1() {
let mut mock_cgroups = MockCgroupFs::new().unwrap();
mock_cgroups.add_v1_mounts().unwrap();
let builder =
CgroupConfigurationBuilder::new(1, mock_cgroups.proc_mounts_path.to_str().unwrap());
builder.unwrap();
}
#[test]
fn test_cgroup_conf_builder_v2() {
let mut mock_cgroups = MockCgroupFs::new().unwrap();
mock_cgroups.add_v2_mounts().unwrap();
let builder =
CgroupConfigurationBuilder::new(2, mock_cgroups.proc_mounts_path.to_str().unwrap());
builder.unwrap();
}
#[test]
fn test_cgroup_conf_builder_v2_with_v1_mounts() {
let mut mock_cgroups = MockCgroupFs::new().unwrap();
mock_cgroups.add_v1_mounts().unwrap();
let builder =
CgroupConfigurationBuilder::new(2, mock_cgroups.proc_mounts_path.to_str().unwrap());
builder.unwrap_err();
}
#[test]
fn test_cgroup_conf_builder_v2_no_mounts() {
let mock_cgroups = MockCgroupFs::new().unwrap();
let builder =
CgroupConfigurationBuilder::new(2, mock_cgroups.proc_mounts_path.to_str().unwrap());
builder.unwrap_err();
}
#[test]
fn test_cgroup_conf_builder_v1_with_v2_mounts() {
let mut mock_cgroups = MockCgroupFs::new().unwrap();
mock_cgroups.add_v2_mounts().unwrap();
let builder =
CgroupConfigurationBuilder::new(1, mock_cgroups.proc_mounts_path.to_str().unwrap());
builder.unwrap_err();
}
#[test]
fn test_cgroup_conf_build() {
let mut mock_cgroups = MockCgroupFs::new().unwrap();
mock_cgroups.add_v1_mounts().unwrap();
mock_cgroups.add_v2_mounts().unwrap();
for v in &[1, 2] {
let mut builder = CgroupConfigurationBuilder::new(
*v,
mock_cgroups.proc_mounts_path.to_str().unwrap(),
)
.unwrap();
builder
.add_cgroup_property(
"cpuset.mems".to_string(),
"1".to_string(),
"101",
Path::new("fc_test_cg"),
)
.unwrap();
builder.build();
}
}
#[test]
fn test_cgroup_conf_build_invalid() {
let mut mock_cgroups = MockCgroupFs::new().unwrap();
mock_cgroups.add_v1_mounts().unwrap();
mock_cgroups.add_v2_mounts().unwrap();
for v in &[1, 2] {
let mut builder = CgroupConfigurationBuilder::new(
*v,
mock_cgroups.proc_mounts_path.to_str().unwrap(),
)
.unwrap();
builder
.add_cgroup_property(
"invalid.cg".to_string(),
"1".to_string(),
"101",
Path::new("fc_test_cg"),
)
.unwrap_err();
}
}
#[test]
fn test_cgroup_conf_v1_write_value() {
let mut mock_cgroups = MockCgroupFs::new().unwrap();
mock_cgroups.add_v1_mounts().unwrap();
let mut builder =
CgroupConfigurationBuilder::new(1, mock_cgroups.proc_mounts_path.to_str().unwrap())
.unwrap();
builder
.add_cgroup_property(
"cpuset.mems".to_string(),
"1".to_string(),
"101",
Path::new("fc_test_cgv1"),
)
.unwrap();
let cg_conf = builder.build();
let cg_root = mock_cgroups.sys_cgroups_path.join("cpuset");
// with real cgroups these files are created automatically
// since the mock will not do it automatically, we create it here
fs::create_dir_all(cg_root.join("fc_test_cgv1/101")).unwrap();
writeln_special(&cg_root.join("cpuset.mems"), "0-1").unwrap();
writeln_special(&cg_root.join("fc_test_cgv1/cpuset.mems"), "0-1").unwrap();
writeln_special(&cg_root.join("fc_test_cgv1/101/cpuset.mems"), "0-1").unwrap();
cg_conf.setup().unwrap();
// check that the value was written correctly
assert!(cg_root.join("fc_test_cgv1/101/cpuset.mems").exists());
assert_eq!(
read_first_line(cg_root.join("fc_test_cgv1/101/cpuset.mems")).unwrap(),
"1\n"
);
}
#[test]
fn test_cgroup_conf_v2_write_value() {
let mut mock_cgroups = MockCgroupFs::new().unwrap();
mock_cgroups.add_v2_mounts().unwrap();
let builder =
CgroupConfigurationBuilder::new(2, mock_cgroups.proc_mounts_path.to_str().unwrap());
builder.unwrap();
let mut builder =
CgroupConfigurationBuilder::new(2, mock_cgroups.proc_mounts_path.to_str().unwrap())
.unwrap();
builder
.add_cgroup_property(
"cpuset.mems".to_string(),
"1".to_string(),
"101",
Path::new("fc_test_cgv2"),
)
.unwrap();
let cg_root = mock_cgroups.sys_cgroups_path.join("unified");
assert_eq!(builder.get_v2_hierarchy_path().unwrap(), &cg_root);
let cg_conf = builder.build();
// with real cgroups these files are created automatically
// since the mock will not do it automatically, we create it here
fs::create_dir_all(cg_root.join("fc_test_cgv2/101")).unwrap();
MockCgroupFs::create_file_with_contents(
cg_root.join("fc_test_cgv2/cgroup.subtree_control"),
"",
)
.unwrap();
MockCgroupFs::create_file_with_contents(
cg_root.join("fc_test_cgv2/101/cgroup.subtree_control"),
"",
)
.unwrap();
cg_conf.setup().unwrap();
// check that the value was written correctly
assert!(cg_root.join("fc_test_cgv2/101/cpuset.mems").exists());
assert_eq!(
read_first_line(cg_root.join("fc_test_cgv2/101/cpuset.mems")).unwrap(),
"1\n"
);
// check that the controller was enabled in all parent dirs
assert!(
read_first_line(cg_root.join("cgroup.subtree_control"))
.unwrap()
.contains("cpuset")
);
assert!(
read_first_line(cg_root.join("fc_test_cgv2/cgroup.subtree_control"))
.unwrap()
.contains("cpuset")
);
assert!(
!read_first_line(cg_root.join("fc_test_cgv2/101/cgroup.subtree_control"))
.unwrap()
.contains("cpuset")
);
}
#[test]
fn test_inherit_from_parent() {
// 1. If parent file does not exist, return an error.
// This is /A/B/ .
let dir = TempDir::new().expect("Cannot create temporary directory.");
// This is /A/B/C .
let dir2 = TempDir::new_in(dir.as_path()).expect("Cannot create temporary directory.");
let path2 = PathBuf::from(dir2.as_path());
let result = inherit_from_parent(&path2, "inexistent", 1);
assert!(
matches!(result, Err(JailerError::ReadToString(_, _))),
"{:?}",
result
);
// 2. If parent file exists and is empty, will go one level up, and return error because
// the grandparent file does not exist.
let named_file = TempFile::new_in(dir.as_path()).expect("Cannot create named file.");
let result = inherit_from_parent(&path2, named_file.as_path().to_str().unwrap(), 1);
assert!(
matches!(result, Err(JailerError::CgroupInheritFromParent(_, _))),
"{:?}",
result
);
let child_file = dir2.as_path().join(named_file.as_path().to_str().unwrap());
// 3. If parent file exists and is not empty, will return ok and child file will have its
// contents.
let some_line = "Parent line";
writeln!(named_file.as_file(), "{}", some_line).expect("Cannot write to file.");
let result = inherit_from_parent(&path2, named_file.as_path().to_str().unwrap(), 1);
result.unwrap();
let res = readln_special(&child_file).expect("Cannot read from file.");
assert!(res == some_line);
}
#[test]
fn test_get_controller() {
let mut file = "cpuset.cpu";
// Check valid file.
let mut result = get_controller_from_filename(file);
assert!(
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/jailer/src/main.rs | src/jailer/src/main.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::ffi::{CString, NulError, OsString};
use std::fmt::{Debug, Display};
use std::path::{Path, PathBuf};
use std::{env as p_env, fs, io};
use env::PROC_MOUNTS;
use utils::arg_parser::{ArgParser, Argument, UtilsArgParserError as ParsingError};
use utils::time::{ClockType, get_time_us};
use utils::validators;
use vmm_sys_util::syscall::SyscallReturnCode;
use crate::env::Env;
mod cgroup;
mod chroot;
mod env;
mod resource_limits;
const JAILER_VERSION: &str = env!("CARGO_PKG_VERSION");
#[derive(Debug, thiserror::Error)]
pub enum JailerError {
#[error("Failed to parse arguments: {0}")]
ArgumentParsing(ParsingError),
#[error("{}", format!("Failed to canonicalize path {:?}: {}", .0, .1).replace('\"', ""))]
Canonicalize(PathBuf, io::Error),
#[error("{}", format!("Failed to inherit cgroups configurations from file {} in path {:?}", .1, .0).replace('\"', ""))]
CgroupInheritFromParent(PathBuf, String),
#[error("{1} configurations not found in {0}")]
CgroupLineNotFound(String, String),
#[error("Cgroup invalid file: {0}")]
CgroupInvalidFile(String),
#[error("Invalid format for cgroups: {0}")]
CgroupFormat(String),
#[error("Hierarchy not found: {0}")]
CgroupHierarchyMissing(String),
#[error("Controller {0} is unavailable")]
CgroupControllerUnavailable(String),
#[error("{0} is an invalid cgroup version specifier")]
CgroupInvalidVersion(String),
#[error("Parent cgroup path is invalid. Path should not be absolute or contain '..' or '.'")]
CgroupInvalidParentPath(),
#[error(
"Failed to move process to cgroup ({0}): {1}.\nHint: If you intended to create a child \
cgroup under {0}, pass any --cgroup parameters."
)]
CgroupMove(PathBuf, io::Error),
#[error("Failed to change owner for {0}: {1}")]
ChangeFileOwner(PathBuf, io::Error),
#[error("Failed to chdir into chroot directory: {0}")]
ChdirNewRoot(io::Error),
#[error("Failed to change permissions on {0}: {1}")]
Chmod(PathBuf, io::Error),
#[error("Failed cloning into a new child process: {0}")]
Clone(io::Error),
#[error("Failed to close netns fd: {0}")]
CloseNetNsFd(io::Error),
#[error("Failed to close /dev/null fd: {0}")]
CloseDevNullFd(io::Error),
#[error("Failed to call close range syscall: {0}")]
CloseRange(io::Error),
#[error("{}", format!("Failed to copy {:?} to {:?}: {}", .0, .1, .2).replace('\"', ""))]
Copy(PathBuf, PathBuf, io::Error),
#[error("{}", format!("Failed to create directory {:?}: {}", .0, .1).replace('\"', ""))]
CreateDir(PathBuf, io::Error),
#[error("Encountered interior \\0 while parsing a string")]
CStringParsing(NulError),
#[error("Failed to daemonize: {0}")]
Daemonize(io::Error),
#[error("Failed to open directory {0}: {1}")]
DirOpen(String, String),
#[error("Failed to duplicate fd: {0}")]
Dup2(io::Error),
#[error("Failed to exec into Firecracker: {0}")]
Exec(io::Error),
#[error("{}", format!("Failed to extract filename from path {:?}", .0).replace('\"', ""))]
ExtractFileName(PathBuf),
#[error("{}", format!("Failed to open file {:?}: {}", .0, .1).replace('\"', ""))]
FileOpen(PathBuf, io::Error),
#[error("Failed to decode string from byte array: {0}")]
FromBytesWithNul(std::ffi::FromBytesWithNulError),
#[error("Failed to get flags from fd: {0}")]
GetOldFdFlags(io::Error),
#[error("Failed to get PID (getpid): {0}")]
GetPid(io::Error),
#[error("Failed to get SID (getsid): {0}")]
GetSid(io::Error),
#[error("Invalid gid: {0}")]
Gid(String),
#[error("Invalid instance ID: {0}")]
InvalidInstanceId(validators::ValidatorError),
#[error("{}", format!("File {:?} doesn't have a parent", .0).replace('\"', ""))]
MissingParent(PathBuf),
#[error("Failed to create the jail root directory before pivoting root: {0}")]
MkdirOldRoot(io::Error),
#[error("Failed to create {1} via mknod inside the jail: {0}")]
MknodDev(io::Error, String),
#[error("Failed to bind mount the jail root directory: {0}")]
MountBind(io::Error),
#[error("Failed to change the propagation type to slave: {0}")]
MountPropagationSlave(io::Error),
#[error("{}", format!("{:?} is not a file", .0).replace('\"', ""))]
NotAFile(PathBuf),
#[error("{}", format!("{:?} is not a directory", .0).replace('\"', ""))]
NotADirectory(PathBuf),
#[error("Failed to open /dev/null: {0}")]
OpenDevNull(io::Error),
#[error("{}", format!("Failed to parse path {:?} into an OsString", .0).replace('\"', ""))]
OsStringParsing(PathBuf, OsString),
#[error("Failed to pivot root: {0}")]
PivotRoot(io::Error),
#[error("{}", format!("Failed to read line from {:?}: {}", .0, .1).replace('\"', ""))]
ReadLine(PathBuf, io::Error),
#[error("{}", format!("Failed to read file {:?} into a string: {}", .0, .1).replace('\"', ""))]
ReadToString(PathBuf, io::Error),
#[error("Regex failed: {0}")]
RegEx(regex::Error),
#[error("Invalid resource argument: {0}")]
ResLimitArgument(String),
#[error("Invalid format for resources limits: {0}")]
ResLimitFormat(String),
#[error("Invalid limit value for resource: {0}: {1}")]
ResLimitValue(String, String),
#[error("Failed to remove old jail root directory: {0}")]
RmOldRootDir(io::Error),
#[error("Failed to change current directory: {0}")]
SetCurrentDir(io::Error),
#[error("Failed to join network namespace: netns: {0}")]
SetNetNs(io::Error),
#[error("Failed to set limit for resource: {0}")]
Setrlimit(String),
#[error("Failed to daemonize: setsid: {0}")]
SetSid(io::Error),
#[error("Invalid uid: {0}")]
Uid(String),
#[error("Failed to unmount the old jail root: {0}")]
UmountOldRoot(io::Error),
#[error("Unexpected value for the socket listener fd: {0}")]
UnexpectedListenerFd(i32),
#[error("Failed to unshare into new mount namespace: {0}")]
UnshareNewNs(io::Error),
#[error("Failed to unset the O_CLOEXEC flag on the socket fd: {0}")]
UnsetCloexec(io::Error),
#[error("Slice contains invalid UTF-8 data : {0}")]
UTF8Parsing(std::str::Utf8Error),
#[error("{}", format!("Failed to write to {:?}: {}", .0, .1).replace('\"', ""))]
Write(PathBuf, io::Error),
}
/// Create an ArgParser object which contains info about the command line argument parser and
/// populate it with the expected arguments and their characteristics.
pub fn build_arg_parser() -> ArgParser<'static> {
ArgParser::new()
.arg(
Argument::new("id")
.required(true)
.takes_value(true)
.help("Jail ID."),
)
.arg(
Argument::new("exec-file")
.required(true)
.takes_value(true)
.help("File path to exec into."),
)
.arg(
Argument::new("uid")
.required(true)
.takes_value(true)
.help("The user identifier the jailer switches to after exec."),
)
.arg(
Argument::new("gid")
.required(true)
.takes_value(true)
.help("The group identifier the jailer switches to after exec."),
)
.arg(
Argument::new("chroot-base-dir")
.takes_value(true)
.default_value("/srv/jailer")
.help("The base folder where chroot jails are located."),
)
.arg(
Argument::new("netns")
.takes_value(true)
.help("Path to the network namespace this microVM should join."),
)
.arg(Argument::new("daemonize").takes_value(false).help(
"Daemonize the jailer before exec, by invoking setsid(), and redirecting the standard \
I/O file descriptors to /dev/null.",
))
.arg(
Argument::new("new-pid-ns")
.takes_value(false)
.help("Exec into a new PID namespace."),
)
.arg(Argument::new("cgroup").allow_multiple(true).help(
"Cgroup and value to be set by the jailer. It must follow this format: \
<cgroup_file>=<value> (e.g cpu.shares=10). This argument can be used multiple times \
to add multiple cgroups.",
))
.arg(Argument::new("resource-limit").allow_multiple(true).help(
"Resource limit values to be set by the jailer. It must follow this format: \
<resource>=<value> (e.g no-file=1024). This argument can be used multiple times to \
add multiple resource limits. Current available resource values are:\n\t\tfsize: The \
maximum size in bytes for files created by the process.\n\t\tno-file: Specifies a \
value one greater than the maximum file descriptor number that can be opened by this \
process.",
))
.arg(
Argument::new("cgroup-version")
.takes_value(true)
.default_value("1")
.help("Select the cgroup version used by the jailer."),
)
.arg(
Argument::new("parent-cgroup")
.takes_value(true)
.help("Parent cgroup in which the cgroup of this microvm will be placed."),
)
.arg(
Argument::new("version")
.takes_value(false)
.help("Print the binary version number."),
)
}
// It's called writeln_special because we have to use this rather convoluted way of writing
// to special cgroup files, to avoid getting errors. It would be nice to know why that happens :-s
pub fn writeln_special<T, V>(file_path: &T, value: V) -> Result<(), JailerError>
where
T: AsRef<Path> + Debug,
V: Display + Debug,
{
fs::write(file_path, format!("{}\n", value))
.map_err(|err| JailerError::Write(PathBuf::from(file_path.as_ref()), err))
}
pub fn readln_special<T: AsRef<Path> + Debug>(file_path: &T) -> Result<String, JailerError> {
let mut line = fs::read_to_string(file_path)
.map_err(|err| JailerError::ReadToString(PathBuf::from(file_path.as_ref()), err))?;
// Remove the newline character at the end (if any).
line.pop();
Ok(line)
}
fn close_fds_by_close_range() -> Result<(), JailerError> {
// First try using the close_range syscall to close all open FDs in the range of 3..UINT_MAX
// SAFETY: if the syscall is not available then ENOSYS will be returned
SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_close_range,
3,
libc::c_uint::MAX,
libc::CLOSE_RANGE_UNSHARE,
)
})
.into_empty_result()
.map_err(JailerError::CloseRange)
}
// Closes all FDs other than 0 (STDIN), 1 (STDOUT) and 2 (STDERR)
fn close_inherited_fds() -> Result<(), JailerError> {
// We use the close_range syscall which is available on kernels > 5.9.
close_fds_by_close_range()?;
Ok(())
}
fn sanitize_process() -> Result<(), JailerError> {
// First thing to do is make sure we don't keep any inherited FDs
// other that IN, OUT and ERR.
close_inherited_fds()?;
// Cleanup environment variables.
clean_env_vars();
Ok(())
}
fn clean_env_vars() {
// Remove environment variables received from
// the parent process so there are no leaks
// inside the jailer environment
for (key, _) in p_env::vars() {
// SAFETY: the function is safe to call in a single-threaded program
unsafe {
p_env::remove_var(key);
}
}
}
/// Turns an [`AsRef<Path>`] into a [`CString`] (c style string).
/// The expect should not fail, since Linux paths only contain valid Unicode chars (do they?),
/// and do not contain null bytes (do they?).
pub fn to_cstring<T: AsRef<Path> + Debug>(path: T) -> Result<CString, JailerError> {
let path_str = path
.as_ref()
.to_path_buf()
.into_os_string()
.into_string()
.map_err(|err| JailerError::OsStringParsing(path.as_ref().to_path_buf(), err))?;
CString::new(path_str).map_err(JailerError::CStringParsing)
}
/// We wrap the actual main in order to pretty print an error with Display trait.
fn main() -> Result<(), JailerError> {
let result = main_exec();
if let Err(e) = result {
eprintln!("{}", e);
Err(e)
} else {
Ok(())
}
}
fn main_exec() -> Result<(), JailerError> {
sanitize_process()
.unwrap_or_else(|err| panic!("Failed to sanitize the Jailer process: {}", err));
let mut arg_parser = build_arg_parser();
arg_parser
.parse_from_cmdline()
.map_err(JailerError::ArgumentParsing)?;
let arguments = arg_parser.arguments();
if arguments.flag_present("help") {
println!("Jailer v{}\n", JAILER_VERSION);
println!("{}\n", arg_parser.formatted_help());
println!("Any arguments after the -- separator will be supplied to the jailed binary.\n");
return Ok(());
}
if arguments.flag_present("version") {
println!("Jailer v{}\n", JAILER_VERSION);
return Ok(());
}
Env::new(
arguments,
get_time_us(ClockType::Monotonic),
get_time_us(ClockType::ProcessCpu),
PROC_MOUNTS,
)
.and_then(|env| {
fs::create_dir_all(env.chroot_dir())
.map_err(|err| JailerError::CreateDir(env.chroot_dir().to_owned(), err))?;
env.run()
})?;
Ok(())
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::env;
use std::ffi::CStr;
use std::fs::File;
use std::os::unix::io::IntoRawFd;
use vmm_sys_util::rand;
use super::*;
fn run_close_fds_test(test_fn: fn() -> Result<(), JailerError>) {
let n = 100;
let tmp_dir_path = format!(
"/tmp/jailer/tests/close_fds/_{}",
rand::rand_alphanumerics(4).into_string().unwrap()
);
fs::create_dir_all(&tmp_dir_path).unwrap();
let mut fds = Vec::new();
for i in 0..n {
let maybe_file = File::create(format!("{}/{}", &tmp_dir_path, i));
fds.push(maybe_file.unwrap().into_raw_fd());
}
test_fn().unwrap();
for fd in fds {
let is_fd_opened = unsafe { libc::fcntl(fd, libc::F_GETFD) } == 0;
assert!(!is_fd_opened);
}
fs::remove_dir_all(tmp_dir_path).unwrap();
}
#[test]
fn test_fds_close_range() {
// SAFETY: Always safe
let mut n = unsafe { std::mem::zeroed() };
// SAFETY: We check if the uname call succeeded
assert_eq!(unsafe { libc::uname(&mut n) }, 0);
// SAFETY: Always safe
let release = unsafe { CStr::from_ptr(n.release.as_ptr()) }
.to_string_lossy()
.into_owned();
// Parse the major and minor version of the kernel
let mut r = release.split('.');
let major: i32 = str::parse(r.next().unwrap()).unwrap();
let minor: i32 = str::parse(r.next().unwrap()).unwrap();
// Skip this test if we're running on a too old kernel
if major > 5 || (major == 5 && minor >= 9) {
run_close_fds_test(close_fds_by_close_range);
}
}
#[test]
fn test_sanitize_process() {
run_close_fds_test(sanitize_process);
}
#[test]
fn test_clean_env_vars() {
let env_vars: [&str; 5] = ["VAR1", "VAR2", "VAR3", "VAR4", "VAR5"];
// Set environment variables
for env_var in env_vars.iter() {
// SAFETY: the function is safe to call in a single-threaded program
unsafe {
env::set_var(env_var, "0");
}
}
// Cleanup the environment
clean_env_vars();
// Assert that the variables set beforehand
// do not exist anymore
for env_var in env_vars.iter() {
assert_eq!(env::var_os(env_var), None);
}
}
#[test]
fn test_to_cstring() {
let path = Path::new("some_path");
let cstring_path = to_cstring(path).unwrap();
assert_eq!(cstring_path, CString::new("some_path").unwrap());
let path_with_nul = Path::new("some_path\0");
assert_eq!(
format!("{}", to_cstring(path_with_nul).unwrap_err()),
"Encountered interior \\0 while parsing a string"
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/rebase-snap/src/main.rs | src/rebase-snap/src/main.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::env;
use std::fs::{File, OpenOptions};
use std::io::{Seek, SeekFrom};
use std::os::unix::io::AsRawFd;
use utils::arg_parser::{ArgParser, Argument, Arguments, UtilsArgParserError as ArgError};
use vmm_sys_util::seek_hole::SeekHole;
const REBASE_SNAP_VERSION: &str = env!("CARGO_PKG_VERSION");
const BASE_FILE: &str = "base-file";
const DIFF_FILE: &str = "diff-file";
const DEPRECATION_MSG: &str = "This tool is deprecated and will be removed in the future. Please \
use 'snapshot-editor' instead.\n";
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum FileError {
/// Invalid base file: {0}
InvalidBaseFile(std::io::Error),
/// Invalid diff file: {0}
InvalidDiffFile(std::io::Error),
/// Failed to seek data: {0}
SeekData(std::io::Error),
/// Failed to seek hole: {0}
SeekHole(std::io::Error),
/// Failed to seek: {0}
Seek(std::io::Error),
/// Failed to send the file: {0}
SendFile(std::io::Error),
/// Failed to get metadata: {0}
Metadata(std::io::Error),
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum RebaseSnapError {
/// Arguments parsing error: {0} \n\nFor more information try --help.
ArgParse(ArgError),
/// Error parsing the cmd line args: {0}
SnapFile(FileError),
/// Error merging the files: {0}
RebaseFiles(FileError),
}
fn build_arg_parser<'a>() -> ArgParser<'a> {
ArgParser::new()
.arg(
Argument::new(BASE_FILE)
.required(true)
.takes_value(true)
.help("File path of the base mem snapshot."),
)
.arg(
Argument::new(DIFF_FILE)
.required(true)
.takes_value(true)
.help("File path of the diff mem snapshot."),
)
}
fn get_files(args: &Arguments) -> Result<(File, File), FileError> {
// Safe to unwrap since the required arguments are checked as part of
// `arg_parser.parse_from_cmdline()`
let base_file_path = args.single_value(BASE_FILE).unwrap();
let base_file = OpenOptions::new()
.write(true)
.open(base_file_path)
.map_err(FileError::InvalidBaseFile)?;
// Safe to unwrap since the required arguments are checked as part of
// `arg_parser.parse_from_cmdline()`
let diff_file_path = args.single_value(DIFF_FILE).unwrap();
let diff_file = OpenOptions::new()
.read(true)
.open(diff_file_path)
.map_err(FileError::InvalidDiffFile)?;
Ok((base_file, diff_file))
}
fn rebase(base_file: &mut File, diff_file: &mut File) -> Result<(), FileError> {
let mut cursor: u64 = 0;
while let Some(block_start) = diff_file.seek_data(cursor).map_err(FileError::SeekData)? {
cursor = block_start;
let block_end = match diff_file
.seek_hole(block_start)
.map_err(FileError::SeekHole)?
{
Some(hole_start) => hole_start,
None => diff_file.metadata().map_err(FileError::Metadata)?.len(),
};
while cursor < block_end {
base_file
.seek(SeekFrom::Start(cursor))
.map_err(FileError::Seek)?;
// SAFETY: Safe because the parameters are valid.
let num_transferred_bytes = unsafe {
libc::sendfile64(
base_file.as_raw_fd(),
diff_file.as_raw_fd(),
(&mut cursor as *mut u64).cast::<i64>(),
usize::try_from(block_end.saturating_sub(cursor)).unwrap(),
)
};
if num_transferred_bytes < 0 {
return Err(FileError::SendFile(std::io::Error::last_os_error()));
}
}
}
Ok(())
}
fn main() -> Result<(), RebaseSnapError> {
let result = main_exec();
if let Err(e) = result {
eprintln!("{}", e);
Err(e)
} else {
Ok(())
}
}
fn main_exec() -> Result<(), RebaseSnapError> {
let mut arg_parser = build_arg_parser();
arg_parser
.parse_from_cmdline()
.map_err(RebaseSnapError::ArgParse)?;
let arguments = arg_parser.arguments();
if arguments.flag_present("help") {
println!("Rebase_snap v{}", REBASE_SNAP_VERSION);
println!(
"Tool that copies all the non-sparse sections from a diff file onto a base file.\n"
);
println!("{DEPRECATION_MSG}");
println!("{}", arg_parser.formatted_help());
return Ok(());
}
if arguments.flag_present("version") {
println!("Rebase_snap v{REBASE_SNAP_VERSION}\n{DEPRECATION_MSG}");
return Ok(());
}
println!("{DEPRECATION_MSG}");
let (mut base_file, mut diff_file) = get_files(arguments).map_err(RebaseSnapError::SnapFile)?;
rebase(&mut base_file, &mut diff_file).map_err(RebaseSnapError::RebaseFiles)?;
Ok(())
}
#[cfg(test)]
mod tests {
use std::io::{Seek, SeekFrom, Write};
use std::os::unix::fs::FileExt;
use vmm_sys_util::{rand, tempfile};
use super::*;
macro_rules! assert_err {
($expression:expr, $($pattern:tt)+) => {
match $expression {
Err($($pattern)+) => (),
ref err => {
println!("expected `{}` but got `{:?}`", stringify!($($pattern)+), err);
assert!(false)
}
}
}
}
#[test]
fn test_parse_args() {
let base_file = tempfile::TempFile::new().unwrap();
let base_file_path = base_file.as_path().to_str().unwrap().to_string();
let diff_file = tempfile::TempFile::new().unwrap();
let diff_file_path = diff_file.as_path().to_str().unwrap().to_string();
let arg_parser = build_arg_parser();
let arguments = &mut arg_parser.arguments().clone();
arguments
.parse(
vec![
"rebase_snap",
"--base-file",
"wrong_file",
"--diff-file",
"diff_file",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>()
.as_ref(),
)
.unwrap();
assert_err!(get_files(arguments), FileError::InvalidBaseFile(_));
let arguments = &mut arg_parser.arguments().clone();
arguments
.parse(
vec![
"rebase_snap",
"--base-file",
&base_file_path,
"--diff-file",
"diff_file",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>()
.as_ref(),
)
.unwrap();
assert_err!(get_files(arguments), FileError::InvalidDiffFile(_));
let arguments = &mut arg_parser.arguments().clone();
arguments
.parse(
vec![
"rebase_snap",
"--base-file",
&base_file_path,
"--diff-file",
&diff_file_path,
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>()
.as_ref(),
)
.unwrap();
get_files(arguments).unwrap();
}
fn check_file_content(file: &mut File, expected_content: &[u8]) {
let mut buf = vec![0u8; expected_content.len()];
file.read_exact_at(buf.as_mut_slice(), 0).unwrap();
assert_eq!(&buf, expected_content);
}
#[test]
fn test_rebase_corner_cases() {
let mut base_file = tempfile::TempFile::new().unwrap().into_file();
let mut diff_file = tempfile::TempFile::new().unwrap().into_file();
// 1. Empty files
rebase(&mut base_file, &mut diff_file).unwrap();
assert_eq!(base_file.metadata().unwrap().len(), 0);
let initial_base_file_content = rand::rand_alphanumerics(50000).into_string().unwrap();
base_file
.write_all(initial_base_file_content.as_bytes())
.unwrap();
// 2. Diff file that has only holes
diff_file
.set_len(initial_base_file_content.len() as u64)
.unwrap();
rebase(&mut base_file, &mut diff_file).unwrap();
check_file_content(&mut base_file, initial_base_file_content.as_bytes());
// 3. Diff file that has only data
let diff_data = rand::rand_alphanumerics(50000).into_string().unwrap();
diff_file.write_all(diff_data.as_bytes()).unwrap();
rebase(&mut base_file, &mut diff_file).unwrap();
check_file_content(&mut base_file, diff_data.as_bytes());
}
#[test]
fn test_rebase() {
// The filesystem punches holes only for blocks >= 4096.
// It doesn't make sense to test for smaller ones.
let block_sizes: &[usize] = &[4096, 8192];
for &block_size in block_sizes {
let mut expected_result = vec![];
let mut base_file = tempfile::TempFile::new().unwrap().into_file();
let mut diff_file = tempfile::TempFile::new().unwrap().into_file();
// 1. Populated block both in base and diff file
let base_block = rand::rand_alphanumerics(block_size).into_string().unwrap();
base_file.write_all(base_block.as_bytes()).unwrap();
let diff_block = rand::rand_alphanumerics(block_size).into_string().unwrap();
diff_file.write_all(diff_block.as_bytes()).unwrap();
expected_result.append(&mut diff_block.into_bytes());
// 2. Populated block in base file, hole in diff file
let base_block = rand::rand_alphanumerics(block_size).into_string().unwrap();
base_file.write_all(base_block.as_bytes()).unwrap();
diff_file
.seek(SeekFrom::Current(i64::try_from(block_size).unwrap()))
.unwrap();
expected_result.append(&mut base_block.into_bytes());
// 3. Populated block in base file, zeroes block in diff file
let base_block = rand::rand_alphanumerics(block_size).into_string().unwrap();
base_file.write_all(base_block.as_bytes()).unwrap();
let mut diff_block = vec![0u8; block_size];
diff_file.write_all(&diff_block).unwrap();
expected_result.append(&mut diff_block);
// Rebase and check the result
rebase(&mut base_file, &mut diff_file).unwrap();
check_file_content(&mut base_file, &expected_result);
// 4. The diff file is bigger
let diff_block = rand::rand_alphanumerics(block_size).into_string().unwrap();
diff_file.write_all(diff_block.as_bytes()).unwrap();
expected_result.append(&mut diff_block.into_bytes());
// Rebase and check the result
rebase(&mut base_file, &mut diff_file).unwrap();
check_file_content(&mut base_file, &expected_result);
// 5. The base file is bigger
let base_block = rand::rand_alphanumerics(block_size).into_string().unwrap();
base_file.write_all(base_block.as_bytes()).unwrap();
expected_result.append(&mut base_block.into_bytes());
// Rebase and check the result
rebase(&mut base_file, &mut diff_file).unwrap();
check_file_content(&mut base_file, &expected_result);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/log-instrument/src/lib.rs | src/log-instrument/src/lib.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::fmt::Write;
use std::sync::{Mutex, OnceLock};
pub use log_instrument_macros::*;
type InnerPath = Mutex<HashMap<std::thread::ThreadId, Vec<&'static str>>>;
static PATH: OnceLock<InnerPath> = OnceLock::new();
fn path() -> &'static InnerPath {
PATH.get_or_init(InnerPath::default)
}
#[allow(missing_debug_implementations)]
pub struct __Instrument;
impl __Instrument {
pub fn new(s: &'static str) -> __Instrument {
// Get log
let mut guard = path().lock().unwrap();
let id = std::thread::current().id();
let prefix = if let Some(spans) = guard.get_mut(&id) {
let out = spans.iter().fold(String::new(), |mut s, x| {
let _ = write!(s, "::{x}");
s
});
spans.push(s);
out
} else {
guard.insert(id, vec![s]);
String::new()
};
// Write log
log::trace!("{id:?}{prefix}>>{s}");
// Return exit struct
__Instrument
}
}
impl std::ops::Drop for __Instrument {
fn drop(&mut self) {
// Get log
let mut guard = path().lock().unwrap();
let id = std::thread::current().id();
let spans = guard.get_mut(&id).unwrap();
let s = spans.pop().unwrap();
let out = spans.iter().fold(String::new(), |mut s, x| {
let _ = write!(s, "::{x}");
s
});
log::trace!("{id:?}{out}<<{s}");
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/log-instrument/examples/one.rs | src/log-instrument/examples/one.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use log::*;
fn main() {
env_logger::builder()
.filter_level(LevelFilter::Trace)
.init();
info!("{}", one(2));
info!("{}", one(3));
info!("{}", one(4));
}
#[log_instrument::instrument]
fn one(x: u32) -> u32 {
let cmp = x == 2;
debug!("cmp: {cmp}");
if cmp {
return 4;
}
x + 3
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/log-instrument/examples/three.rs | src/log-instrument/examples/three.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use log::*;
fn main() {
env_logger::builder()
.filter_level(LevelFilter::Trace)
.init();
info!("{:?}", one(&mut None));
info!(
"{:?}",
one(&mut Some(vec![String::from("a"), String::from("b")]))
);
}
#[log_instrument::instrument]
fn one(x: &mut Option<Vec<String>>) -> Option<&mut [String]> {
match x {
Some(y) => {
debug!("{y:?}");
Some(y)
}
_ => None,
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/log-instrument/examples/six.rs | src/log-instrument/examples/six.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use log::*;
fn main() {
env_logger::builder()
.filter_level(LevelFilter::Trace)
.init();
info!("{}", one(2));
info!("{}", one(3));
info!("{}", one(4));
}
#[log_instrument::instrument]
fn one(x: u32) -> u32 {
let cmp = x == 2;
debug!("cmp: {cmp}");
if cmp {
return 4;
}
two(x + 3)
}
#[log_instrument::instrument]
fn two(x: u32) -> u32 {
let res = x % 2;
debug!("res: {res}");
res
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/log-instrument/examples/five.rs | src/log-instrument/examples/five.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![warn(clippy::pedantic)]
use log::{LevelFilter, debug, info, warn};
fn main() {
env_logger::builder()
.filter_level(LevelFilter::Trace)
.init();
let mut my_struct = MyStruct(None);
info!("{:?}", my_struct.one());
let mut my_struct = MyStruct(Some(vec![String::from("a"), String::from("b")]));
info!("{:?}", my_struct.one());
}
struct MyStruct(Option<Vec<String>>);
impl MyStruct {
#[log_instrument::instrument]
fn one(&mut self) -> Option<&mut [String]> {
const SOMETHING: u32 = 23;
match &mut self.0 {
Some(y) => {
debug!("{y:?}");
debug!("{SOMETHING}");
Some(y)
}
_ => None,
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/log-instrument/examples/two.rs | src/log-instrument/examples/two.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use log::*;
fn main() {
env_logger::builder()
.filter_level(LevelFilter::Trace)
.init();
info!("{:?}", one(&None));
info!(
"{:?}",
one(&Some(vec![String::from("a"), String::from("b")]))
);
}
#[log_instrument::instrument]
fn one(x: &Option<Vec<String>>) -> Option<&[String]> {
match x {
Some(y) => {
debug!("{y:?}");
Some(y)
}
_ => None,
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/log-instrument/examples/four.rs | src/log-instrument/examples/four.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use log::*;
fn main() {
env_logger::builder()
.filter_level(LevelFilter::Trace)
.init();
let mut my_struct = MyStruct(None);
info!("{:?}", my_struct.one());
let mut my_struct = MyStruct(Some(vec![String::from("a"), String::from("b")]));
info!("{:?}", my_struct.one());
}
struct MyStruct(Option<Vec<String>>);
impl MyStruct {
#[log_instrument::instrument]
fn one(&mut self) -> Option<&mut [String]> {
match &mut self.0 {
Some(y) => {
debug!("{y:?}");
Some(y)
}
_ => None,
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/build.rs | src/firecracker/build.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::path::Path;
const ADVANCED_BINARY_FILTER_FILE_NAME: &str = "seccomp_filter.bpf";
const JSON_DIR: &str = "../../resources/seccomp";
const SECCOMPILER_SRC_DIR: &str = "../seccompiler/src";
// This script is run on every modification in the target-specific JSON file in `resources/seccomp`.
// It compiles the JSON seccomp policies into a serializable BPF format, using seccompiler-bin.
// The generated binary code will get included in Firecracker's code, at compile-time.
fn main() {
// Target triple
let target = std::env::var("TARGET").expect("Missing target.");
let debug: bool = std::env::var("DEBUG")
.expect("Missing debug.")
.parse()
.expect("Invalid env variable DEBUG");
let out_dir = std::env::var("OUT_DIR").expect("Missing build-level OUT_DIR.");
// Target arch (x86_64 / aarch64)
let target_arch = std::env::var("CARGO_CFG_TARGET_ARCH").expect("Missing target arch.");
let seccomp_json_path = format!("{}/{}.json", JSON_DIR, target);
// If the current target doesn't have a default filter, or if we're building a debug binary,
// use a default, empty filter.
// This is to make sure that Firecracker builds even with libc toolchains for which we don't
// provide a default filter. For example, GNU libc.
let seccomp_json_path = if debug {
println!(
"cargo:warning=Using empty default seccomp policy for debug builds: \
`resources/seccomp/unimplemented.json`."
);
format!("{}/unimplemented.json", JSON_DIR)
} else if !Path::new(&seccomp_json_path).exists() {
println!(
"cargo:warning=No default seccomp policy for target: {}. Defaulting to \
`resources/seccomp/unimplemented.json`.",
target
);
format!("{}/unimplemented.json", JSON_DIR)
} else {
seccomp_json_path
};
// Retrigger the build script if the JSON file has changed.
// let json_path = json_path.to_str().expect("Invalid bytes");
println!("cargo:rerun-if-changed={}", seccomp_json_path);
// Also retrigger the build script on any seccompiler source code change.
println!("cargo:rerun-if-changed={}", SECCOMPILER_SRC_DIR);
let out_path = format!("{}/{}", out_dir, ADVANCED_BINARY_FILTER_FILE_NAME);
seccompiler::compile_bpf(&seccomp_json_path, &target_arch, &out_path, false)
.expect("Cannot compile seccomp filters");
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/lib.rs | src/firecracker/src/lib.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod api_server;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/seccomp.rs | src/firecracker/src/seccomp.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use vmm::seccomp::{BpfThreadMap, DeserializationError, deserialize_binary, get_empty_filters};
const THREAD_CATEGORIES: [&str; 3] = ["vmm", "api", "vcpu"];
/// Error retrieving seccomp filters.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum FilterError {
/// Filter deserialization failed: {0}
Deserialization(DeserializationError),
/// Invalid thread categories: {0}
ThreadCategories(String),
/// Missing thread category: {0}
MissingThreadCategory(String),
/// Filter file open error: {0}
FileOpen(std::io::Error),
}
/// Seccomp filter configuration.
#[derive(Debug)]
pub enum SeccompConfig {
/// Seccomp filtering disabled.
None,
/// Default, advanced filters.
Advanced,
/// Custom, user-provided filters.
Custom(File),
}
impl SeccompConfig {
/// Given the relevant command line args, return the appropriate config type.
pub fn from_args<T: AsRef<Path> + Debug>(
no_seccomp: bool,
seccomp_filter: Option<T>,
) -> Result<Self, FilterError> {
if no_seccomp {
Ok(SeccompConfig::None)
} else {
match seccomp_filter {
Some(path) => Ok(SeccompConfig::Custom(
File::open(path).map_err(FilterError::FileOpen)?,
)),
None => Ok(SeccompConfig::Advanced),
}
}
}
}
/// Retrieve the appropriate filters, based on the SeccompConfig.
pub fn get_filters(config: SeccompConfig) -> Result<BpfThreadMap, FilterError> {
match config {
SeccompConfig::None => Ok(get_empty_filters()),
SeccompConfig::Advanced => get_default_filters(),
SeccompConfig::Custom(reader) => get_custom_filters(reader),
}
}
/// Retrieve the default filters containing the syscall rules required by `Firecracker`
/// to function. The binary file is generated via the `build.rs` script of this crate.
fn get_default_filters() -> Result<BpfThreadMap, FilterError> {
// Retrieve, at compile-time, the serialized binary filter generated with seccompiler.
let bytes: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/seccomp_filter.bpf"));
let map = deserialize_binary(bytes).map_err(FilterError::Deserialization)?;
filter_thread_categories(map)
}
/// Retrieve custom seccomp filters.
fn get_custom_filters<R: Read + Debug>(reader: R) -> Result<BpfThreadMap, FilterError> {
let map = deserialize_binary(BufReader::new(reader)).map_err(FilterError::Deserialization)?;
filter_thread_categories(map)
}
/// Return an error if the BpfThreadMap contains invalid thread categories.
fn filter_thread_categories(map: BpfThreadMap) -> Result<BpfThreadMap, FilterError> {
let (filters, invalid_filters): (BpfThreadMap, BpfThreadMap) = map
.into_iter()
.partition(|(k, _)| THREAD_CATEGORIES.contains(&k.as_str()));
if !invalid_filters.is_empty() {
// build the error message
let mut thread_categories_string =
invalid_filters
.keys()
.fold("".to_string(), |mut acc, elem| {
acc.push_str(elem);
acc.push(',');
acc
});
thread_categories_string.pop();
return Err(FilterError::ThreadCategories(thread_categories_string));
}
for &category in THREAD_CATEGORIES.iter() {
let category_string = category.to_string();
if !filters.contains_key(&category_string) {
return Err(FilterError::MissingThreadCategory(category_string));
}
}
Ok(filters)
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use vmm::seccomp::BpfThreadMap;
use vmm_sys_util::tempfile::TempFile;
use super::*;
#[test]
fn test_get_filters() {
let mut filters = get_empty_filters();
assert_eq!(filters.len(), 3);
assert!(filters.remove("vmm").is_some());
assert!(filters.remove("api").is_some());
assert!(filters.remove("vcpu").is_some());
let mut filters = get_empty_filters();
assert_eq!(filters.len(), 3);
assert_eq!(filters.remove("vmm").unwrap().len(), 0);
assert_eq!(filters.remove("api").unwrap().len(), 0);
assert_eq!(filters.remove("vcpu").unwrap().len(), 0);
let file = TempFile::new().unwrap().into_file();
get_filters(SeccompConfig::Custom(file)).unwrap_err();
}
#[test]
fn test_filter_thread_categories() {
// correct categories
let mut map = BpfThreadMap::new();
map.insert("vcpu".to_string(), Arc::new(vec![]));
map.insert("vmm".to_string(), Arc::new(vec![]));
map.insert("api".to_string(), Arc::new(vec![]));
assert_eq!(filter_thread_categories(map).unwrap().len(), 3);
// invalid categories
let mut map = BpfThreadMap::new();
map.insert("vcpu".to_string(), Arc::new(vec![]));
map.insert("vmm".to_string(), Arc::new(vec![]));
map.insert("thread1".to_string(), Arc::new(vec![]));
map.insert("thread2".to_string(), Arc::new(vec![]));
match filter_thread_categories(map).unwrap_err() {
FilterError::ThreadCategories(err) => {
assert!(err == "thread2,thread1" || err == "thread1,thread2")
}
_ => panic!("Expected ThreadCategories error."),
}
// missing category
let mut map = BpfThreadMap::new();
map.insert("vcpu".to_string(), Arc::new(vec![]));
map.insert("vmm".to_string(), Arc::new(vec![]));
match filter_thread_categories(map).unwrap_err() {
FilterError::MissingThreadCategory(name) => assert_eq!(name, "api"),
_ => panic!("Expected MissingThreadCategory error."),
}
}
#[test]
fn test_seccomp_config() {
assert!(matches!(
SeccompConfig::from_args(true, Option::<&str>::None),
Ok(SeccompConfig::None)
));
assert!(matches!(
SeccompConfig::from_args(false, Some("/dev/null")),
Ok(SeccompConfig::Custom(_))
));
assert!(matches!(
SeccompConfig::from_args(false, Some("invalid_path")),
Err(FilterError::FileOpen(_))
));
// test the default case, no parametes -> default advanced.
assert!(matches!(
SeccompConfig::from_args(false, Option::<&str>::None),
Ok(SeccompConfig::Advanced)
));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/main.rs | src/firecracker/src/main.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
mod api_server;
mod api_server_adapter;
mod generated;
mod metrics;
mod seccomp;
use std::fs::{self, File};
use std::path::PathBuf;
use std::process::ExitCode;
use std::str::FromStr;
use std::sync::{Arc, Mutex};
use std::{io, panic};
use api_server_adapter::ApiServerError;
use event_manager::SubscriberOps;
use seccomp::FilterError;
use utils::arg_parser::{ArgParser, Argument};
use utils::validators::validate_instance_id;
use vmm::arch::host_page_size;
use vmm::builder::StartMicrovmError;
use vmm::logger::{
LOGGER, LoggerConfig, METRICS, ProcessTimeReporter, StoreMetric, debug, error, info,
};
use vmm::persist::SNAPSHOT_VERSION;
use vmm::resources::VmResources;
use vmm::seccomp::BpfThreadMap;
use vmm::signal_handler::register_signal_handlers;
use vmm::snapshot::{SnapshotError, get_format_version};
use vmm::vmm_config::instance_info::{InstanceInfo, VmState};
use vmm::vmm_config::metrics::{MetricsConfig, MetricsConfigError, init_metrics};
use vmm::{EventManager, FcExitCode, HTTP_MAX_PAYLOAD_SIZE};
use vmm_sys_util::terminal::Terminal;
use crate::seccomp::SeccompConfig;
// The reason we place default API socket under /run is that API socket is a
// runtime file.
// see https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s15.html for more information.
const DEFAULT_API_SOCK_PATH: &str = "/run/firecracker.socket";
const FIRECRACKER_VERSION: &str = env!("CARGO_PKG_VERSION");
const MMDS_CONTENT_ARG: &str = "metadata";
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum MainError {
/// Failed to set the logger: {0}
SetLogger(vmm::logger::LoggerInitError),
/// Failed to register signal handlers: {0}
RegisterSignalHandlers(#[source] vmm_sys_util::errno::Error),
/// Arguments parsing error: {0} \n\nFor more information try --help.
ParseArguments(#[from] utils::arg_parser::UtilsArgParserError),
/// When printing Snapshot Data format: {0}
PrintSnapshotDataFormat(#[from] SnapshotVersionError),
/// Invalid value for logger level: {0}.Possible values: [Error, Warning, Info, Debug]
InvalidLogLevel(vmm::logger::LevelFilterFromStrError),
/// Could not initialize logger: {0}
LoggerInitialization(vmm::logger::LoggerUpdateError),
/// Could not initialize metrics: {0}
MetricsInitialization(MetricsConfigError),
/// Seccomp error: {0}
SeccompFilter(FilterError),
/// Failed to resize fd table: {0}
ResizeFdtable(ResizeFdTableError),
/// RunWithApiError error: {0}
RunWithApi(ApiServerError),
/// RunWithoutApiError error: {0}
RunWithoutApiError(RunWithoutApiError),
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum ResizeFdTableError {
/// Failed to get RLIMIT_NOFILE
GetRlimit,
/// Failed to call dup2 to resize fdtable
Dup2(io::Error),
/// Failed to close dup2'd file descriptor
Close(io::Error),
}
impl From<MainError> for FcExitCode {
fn from(value: MainError) -> Self {
match value {
MainError::ParseArguments(_) => FcExitCode::ArgParsing,
MainError::InvalidLogLevel(_) => FcExitCode::BadConfiguration,
MainError::RunWithApi(ApiServerError::MicroVMStoppedWithError(code)) => code,
MainError::RunWithoutApiError(RunWithoutApiError::Shutdown(code)) => code,
_ => FcExitCode::GenericError,
}
}
}
fn main() -> ExitCode {
let result = main_exec();
if let Err(err) = result {
error!("{err}");
eprintln!("Error: {err:?}");
let exit_code = FcExitCode::from(err) as u8;
error!("Firecracker exiting with error. exit_code={exit_code}");
ExitCode::from(exit_code)
} else {
info!("Firecracker exiting successfully. exit_code=0");
ExitCode::SUCCESS
}
}
fn main_exec() -> Result<(), MainError> {
// Initialize the logger.
LOGGER.init().map_err(MainError::SetLogger)?;
// First call to this function updates the value to current
// host page size.
_ = host_page_size();
// We need this so that we can reset terminal to canonical mode if panic occurs.
let stdin = io::stdin();
// Start firecracker by setting up a panic hook, which will be called before
// terminating as we're building with panic = "abort".
// It's worth noting that the abort is caused by sending a SIG_ABORT signal to the process.
panic::set_hook(Box::new(move |info| {
// We're currently using the closure parameter, which is a &PanicInfo, for printing the
// origin of the panic, including the payload passed to panic! and the source code location
// from which the panic originated.
error!("Firecracker {}", info);
if let Err(err) = stdin.lock().set_canon_mode() {
error!(
"Failure while trying to reset stdin to canonical mode: {}",
err
);
}
METRICS.vmm.panic_count.store(1);
// Write the metrics before aborting.
if let Err(err) = METRICS.write() {
error!("Failed to write metrics while panicking: {}", err);
}
}));
let http_max_payload_size_str = HTTP_MAX_PAYLOAD_SIZE.to_string();
let mut arg_parser =
ArgParser::new()
.arg(
Argument::new("api-sock")
.takes_value(true)
.default_value(DEFAULT_API_SOCK_PATH)
.help("Path to unix domain socket used by the API."),
)
.arg(
Argument::new("id")
.takes_value(true)
.default_value(vmm::logger::DEFAULT_INSTANCE_ID)
.help("MicroVM unique identifier."),
)
.arg(
Argument::new("seccomp-filter")
.takes_value(true)
.forbids(vec!["no-seccomp"])
.help(
"Optional parameter which allows specifying the path to a custom seccomp \
filter. For advanced users.",
),
)
.arg(
Argument::new("no-seccomp")
.takes_value(false)
.forbids(vec!["seccomp-filter"])
.help(
"Optional parameter which allows starting and using a microVM without \
seccomp filtering. Not recommended.",
),
)
.arg(
Argument::new("start-time-us").takes_value(true).help(
"Process start time (wall clock, microseconds). This parameter is optional.",
),
)
.arg(Argument::new("start-time-cpu-us").takes_value(true).help(
"Process start CPU time (wall clock, microseconds). This parameter is optional.",
))
.arg(Argument::new("parent-cpu-time-us").takes_value(true).help(
"Parent process CPU time (wall clock, microseconds). This parameter is optional.",
))
.arg(
Argument::new("config-file")
.takes_value(true)
.help("Path to a file that contains the microVM configuration in JSON format."),
)
.arg(
Argument::new(MMDS_CONTENT_ARG).takes_value(true).help(
"Path to a file that contains metadata in JSON format to add to the mmds.",
),
)
.arg(
Argument::new("no-api")
.takes_value(false)
.requires("config-file")
.help(
"Optional parameter which allows starting and using a microVM without an \
active API socket.",
),
)
.arg(
Argument::new("log-path")
.takes_value(true)
.help("Path to a fifo or a file used for configuring the logger on startup."),
)
.arg(
Argument::new("level")
.takes_value(true)
.help("Set the logger level."),
)
.arg(
Argument::new("module")
.takes_value(true)
.help("Set the logger module filter."),
)
.arg(
Argument::new("show-level")
.takes_value(false)
.help("Whether or not to output the level in the logs."),
)
.arg(Argument::new("show-log-origin").takes_value(false).help(
"Whether or not to include the file path and line number of the log's origin.",
))
.arg(
Argument::new("metrics-path")
.takes_value(true)
.help("Path to a fifo or a file used for configuring the metrics on startup."),
)
.arg(Argument::new("boot-timer").takes_value(false).help(
"Whether or not to load boot timer device for logging elapsed time since \
InstanceStart command.",
))
.arg(
Argument::new("version")
.takes_value(false)
.help("Print the binary version number."),
)
.arg(
Argument::new("snapshot-version")
.takes_value(false)
.help("Print the supported data format version."),
)
.arg(
Argument::new("describe-snapshot")
.takes_value(true)
.help("Print the data format version of the provided snapshot state file."),
)
.arg(
Argument::new("http-api-max-payload-size")
.takes_value(true)
.default_value(&http_max_payload_size_str)
.help("Http API request payload max size, in bytes."),
)
.arg(
Argument::new("mmds-size-limit")
.takes_value(true)
.help("Mmds data store limit, in bytes."),
)
.arg(
Argument::new("enable-pci")
.takes_value(false)
.help("Enables PCIe support."),
);
arg_parser.parse_from_cmdline()?;
let arguments = arg_parser.arguments();
if arguments.flag_present("help") {
println!("Firecracker v{}\n", FIRECRACKER_VERSION);
println!("{}", arg_parser.formatted_help());
return Ok(());
}
if arguments.flag_present("version") {
println!("Firecracker v{}\n", FIRECRACKER_VERSION);
return Ok(());
}
if arguments.flag_present("snapshot-version") {
println!("v{SNAPSHOT_VERSION}");
return Ok(());
}
if let Some(snapshot_path) = arguments.single_value("describe-snapshot") {
print_snapshot_data_format(snapshot_path)?;
return Ok(());
}
// It's safe to unwrap here because the field's been provided with a default value.
let instance_id = arguments.single_value("id").unwrap();
validate_instance_id(instance_id.as_str()).expect("Invalid instance ID");
// Apply the logger configuration.
vmm::logger::INSTANCE_ID
.set(String::from(instance_id))
.unwrap();
let log_path = arguments.single_value("log-path").map(PathBuf::from);
let level = arguments
.single_value("level")
.map(|s| vmm::logger::LevelFilter::from_str(s))
.transpose()
.map_err(MainError::InvalidLogLevel)?;
let show_level = arguments.flag_present("show-level").then_some(true);
let show_log_origin = arguments.flag_present("show-log-origin").then_some(true);
let module = arguments.single_value("module").cloned();
LOGGER
.update(LoggerConfig {
log_path,
level,
show_level,
show_log_origin,
module,
})
.map_err(MainError::LoggerInitialization)?;
info!("Running Firecracker v{FIRECRACKER_VERSION}");
register_signal_handlers().map_err(MainError::RegisterSignalHandlers)?;
#[cfg(target_arch = "aarch64")]
enable_ssbd_mitigation();
if let Err(err) = resize_fdtable() {
match err {
// These errors are non-critical: In the worst case we have worse snapshot restore
// performance.
ResizeFdTableError::GetRlimit | ResizeFdTableError::Dup2(_) => {
debug!("Failed to resize fdtable: {err}")
}
// This error means that we now have a random file descriptor lying around, abort to be
// cautious.
ResizeFdTableError::Close(_) => return Err(MainError::ResizeFdtable(err)),
}
}
// Display warnings for any used deprecated parameters.
// Currently unused since there are no deprecated parameters. Uncomment the line when
// deprecating one.
// warn_deprecated_parameters(&arguments);
let instance_info = InstanceInfo {
id: instance_id.clone(),
state: VmState::NotStarted,
vmm_version: FIRECRACKER_VERSION.to_string(),
app_name: "Firecracker".to_string(),
};
if let Some(metrics_path) = arguments.single_value("metrics-path") {
let metrics_config = MetricsConfig {
metrics_path: PathBuf::from(metrics_path),
};
init_metrics(metrics_config).map_err(MainError::MetricsInitialization)?;
}
let mut seccomp_filters: BpfThreadMap = SeccompConfig::from_args(
arguments.flag_present("no-seccomp"),
arguments.single_value("seccomp-filter"),
)
.and_then(seccomp::get_filters)
.map_err(MainError::SeccompFilter)?;
let vmm_config_json = arguments
.single_value("config-file")
.map(fs::read_to_string)
.map(|x| x.expect("Unable to open or read from the configuration file"));
let metadata_json = arguments
.single_value(MMDS_CONTENT_ARG)
.map(fs::read_to_string)
.map(|x| x.expect("Unable to open or read from the mmds content file"));
let boot_timer_enabled = arguments.flag_present("boot-timer");
let pci_enabled = arguments.flag_present("enable-pci");
let api_enabled = !arguments.flag_present("no-api");
let api_payload_limit = arg_parser
.arguments()
.single_value("http-api-max-payload-size")
.map(|lim| {
lim.parse::<usize>()
.expect("'http-api-max-payload-size' parameter expected to be of 'usize' type.")
})
// Safe to unwrap as we provide a default value.
.unwrap();
// If the mmds size limit is not explicitly configured, default to using the
// `http-api-max-payload-size` value.
let mmds_size_limit = arg_parser
.arguments()
.single_value("mmds-size-limit")
.map(|lim| {
lim.parse::<usize>()
.expect("'mmds-size-limit' parameter expected to be of 'usize' type.")
})
.unwrap_or_else(|| api_payload_limit);
if api_enabled {
let bind_path = arguments
.single_value("api-sock")
.map(PathBuf::from)
.expect("Missing argument: api-sock");
let start_time_us = arguments.single_value("start-time-us").map(|s| {
s.parse::<u64>()
.expect("'start-time-us' parameter expected to be of 'u64' type.")
});
let start_time_cpu_us = arguments.single_value("start-time-cpu-us").map(|s| {
s.parse::<u64>()
.expect("'start-time-cpu-us' parameter expected to be of 'u64' type.")
});
let parent_cpu_time_us = arguments.single_value("parent-cpu-time-us").map(|s| {
s.parse::<u64>()
.expect("'parent-cpu-time-us' parameter expected to be of 'u64' type.")
});
let process_time_reporter =
ProcessTimeReporter::new(start_time_us, start_time_cpu_us, parent_cpu_time_us);
api_server_adapter::run_with_api(
&mut seccomp_filters,
vmm_config_json,
bind_path,
instance_info,
process_time_reporter,
boot_timer_enabled,
pci_enabled,
api_payload_limit,
mmds_size_limit,
metadata_json.as_deref(),
)
.map_err(MainError::RunWithApi)
} else {
let seccomp_filters: BpfThreadMap = seccomp_filters
.into_iter()
.filter(|(k, _)| k != "api")
.collect();
run_without_api(
&seccomp_filters,
vmm_config_json,
instance_info,
boot_timer_enabled,
pci_enabled,
mmds_size_limit,
metadata_json.as_deref(),
)
.map_err(MainError::RunWithoutApiError)
}
}
/// Attempts to resize the processes file descriptor table to match RLIMIT_NOFILE or 2048 if no
/// RLIMIT_NOFILE is set (this can only happen if firecracker is run outside the jailer. 2048 is
/// the default the jailer would set).
///
/// We do this resizing because the kernel default is 64, with a reallocation happening whenever
/// the table fills up. This was happening for some larger microVMs, and reallocating the
/// fdtable while a lot of file descriptors are active (due to being eventfds/timerfds registered
/// to epoll) incurs a penalty of 30ms-70ms on the snapshot restore path.
fn resize_fdtable() -> Result<(), ResizeFdTableError> {
let mut rlimit = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
// SAFETY: We pass a pointer to a valid area of memory to which we have exclusive mutable access
if unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlimit as *mut libc::rlimit) } < 0 {
return Err(ResizeFdTableError::GetRlimit);
}
// If no jailer is used, there might not be an NOFILE limit set. In this case, resize
// the table to the default that the jailer would usually impose (2048)
let limit: libc::c_int = if rlimit.rlim_cur == libc::RLIM_INFINITY {
2048
} else {
rlimit.rlim_cur.try_into().unwrap_or(2048)
};
// Resize the file descriptor table to its maximal possible size, to ensure that
// firecracker will not need to reallocate it later. If the file descriptor table
// needs to be reallocated (which by default happens once more than 64 fds exist,
// something that happens for reasonably complex microvms due to each device using
// a multitude of eventfds), this can incur a significant performance impact (it
// was responsible for a 30ms-70ms impact on snapshot restore times).
if limit > 3 {
// SAFETY: Duplicating stdin is safe
if unsafe { libc::dup2(0, limit - 1) } < 0 {
return Err(ResizeFdTableError::Dup2(io::Error::last_os_error()));
}
// SAFETY: Closing the just created duplicate is safe
if unsafe { libc::close(limit - 1) } < 0 {
return Err(ResizeFdTableError::Close(io::Error::last_os_error()));
}
}
Ok(())
}
/// Enable SSBD mitigation through `prctl`.
#[cfg(target_arch = "aarch64")]
pub fn enable_ssbd_mitigation() {
// SAFETY: Parameters are valid since they are copied verbatim
// from the kernel's UAPI.
// PR_SET_SPECULATION_CTRL only uses those 2 parameters, so it's ok
// to leave the latter 2 as zero.
let ret = unsafe {
libc::prctl(
generated::prctl::PR_SET_SPECULATION_CTRL,
generated::prctl::PR_SPEC_STORE_BYPASS,
generated::prctl::PR_SPEC_FORCE_DISABLE,
0,
0,
)
};
if ret < 0 {
let last_error = std::io::Error::last_os_error().raw_os_error().unwrap();
error!(
"Could not enable SSBD mitigation through prctl, error {}",
last_error
);
if last_error == libc::EINVAL {
error!("The host does not support SSBD mitigation through prctl.");
}
}
}
// Log a warning for any usage of deprecated parameters.
#[allow(unused)]
fn warn_deprecated_parameters() {}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum SnapshotVersionError {
/// Unable to open snapshot state file: {0}
OpenSnapshot(io::Error),
/// Invalid data format version of snapshot file: {0}
SnapshotVersion(SnapshotError),
}
// Print data format of provided snapshot state file.
fn print_snapshot_data_format(snapshot_path: &str) -> Result<(), SnapshotVersionError> {
let mut snapshot_reader =
File::open(snapshot_path).map_err(SnapshotVersionError::OpenSnapshot)?;
let data_format_version =
get_format_version(&mut snapshot_reader).map_err(SnapshotVersionError::SnapshotVersion)?;
println!("v{}", data_format_version);
Ok(())
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum BuildFromJsonError {
/// Configuration for VMM from one single json failed: {0}
ParseFromJson(vmm::resources::ResourcesError),
/// Could not Start MicroVM from one single json: {0}
StartMicroVM(StartMicrovmError),
}
// Configure and start a microVM as described by the command-line JSON.
#[allow(clippy::too_many_arguments)]
fn build_microvm_from_json(
seccomp_filters: &BpfThreadMap,
event_manager: &mut EventManager,
config_json: String,
instance_info: InstanceInfo,
boot_timer_enabled: bool,
pci_enabled: bool,
mmds_size_limit: usize,
metadata_json: Option<&str>,
) -> Result<(VmResources, Arc<Mutex<vmm::Vmm>>), BuildFromJsonError> {
let mut vm_resources =
VmResources::from_json(&config_json, &instance_info, mmds_size_limit, metadata_json)
.map_err(BuildFromJsonError::ParseFromJson)?;
vm_resources.boot_timer = boot_timer_enabled;
vm_resources.pci_enabled = pci_enabled;
let vmm = vmm::builder::build_and_boot_microvm(
&instance_info,
&vm_resources,
event_manager,
seccomp_filters,
)
.map_err(BuildFromJsonError::StartMicroVM)?;
info!("Successfully started microvm that was configured from one single json");
Ok((vm_resources, vmm))
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum RunWithoutApiError {
/// MicroVMStopped without an error: {0:?}
Shutdown(FcExitCode),
/// Failed to build MicroVM from Json: {0}
BuildMicroVMFromJson(BuildFromJsonError),
}
fn run_without_api(
seccomp_filters: &BpfThreadMap,
config_json: Option<String>,
instance_info: InstanceInfo,
bool_timer_enabled: bool,
pci_enabled: bool,
mmds_size_limit: usize,
metadata_json: Option<&str>,
) -> Result<(), RunWithoutApiError> {
let mut event_manager = EventManager::new().expect("Unable to create EventManager");
// Create the firecracker metrics object responsible for periodically printing metrics.
let firecracker_metrics = Arc::new(Mutex::new(metrics::PeriodicMetrics::new()));
event_manager.add_subscriber(firecracker_metrics.clone());
// Build the microVm. We can ignore VmResources since it's not used without api.
let (_, vmm) = build_microvm_from_json(
seccomp_filters,
&mut event_manager,
// Safe to unwrap since '--no-api' requires this to be set.
config_json.unwrap(),
instance_info,
bool_timer_enabled,
pci_enabled,
mmds_size_limit,
metadata_json,
)
.map_err(RunWithoutApiError::BuildMicroVMFromJson)?;
// Start the metrics.
firecracker_metrics
.lock()
.expect("Poisoned lock")
.start(metrics::WRITE_METRICS_PERIOD_MS);
// Run the EventManager that drives everything in the microVM.
loop {
event_manager
.run()
.expect("Failed to start the event manager");
match vmm.lock().unwrap().shutdown_exit_code() {
Some(FcExitCode::Ok) => break,
Some(exit_code) => return Err(RunWithoutApiError::Shutdown(exit_code)),
None => continue,
}
}
Ok(())
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/metrics.rs | src/firecracker/src/metrics.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::os::unix::io::AsRawFd;
use std::time::Duration;
use event_manager::{EventOps, Events, MutEventSubscriber};
use utils::time::TimerFd;
use vmm::logger::{IncMetric, METRICS, error, warn};
use vmm_sys_util::epoll::EventSet;
/// Metrics reporting period.
pub(crate) const WRITE_METRICS_PERIOD_MS: u64 = 60000;
/// Object to drive periodic reporting of metrics.
#[derive(Debug)]
pub(crate) struct PeriodicMetrics {
write_metrics_event_fd: TimerFd,
#[cfg(test)]
flush_counter: u64,
}
impl PeriodicMetrics {
/// PeriodicMetrics constructor. Can panic on `TimerFd` creation failure.
pub fn new() -> Self {
let write_metrics_event_fd = TimerFd::new();
PeriodicMetrics {
write_metrics_event_fd,
#[cfg(test)]
flush_counter: 0,
}
}
/// Start the periodic metrics engine which will flush metrics every `interval_ms` millisecs.
pub(crate) fn start(&mut self, interval_ms: u64) {
// Arm the log write timer.
let duration = Duration::from_millis(interval_ms);
self.write_metrics_event_fd.arm(duration, Some(duration));
// Write the metrics straight away to check the process startup time.
self.write_metrics();
}
fn write_metrics(&mut self) {
if let Err(err) = METRICS.write() {
METRICS.logger.missed_metrics_count.inc();
error!("Failed to write metrics: {}", err);
}
#[cfg(test)]
{
self.flush_counter += 1;
}
}
}
impl MutEventSubscriber for PeriodicMetrics {
/// Handle a read event (EPOLLIN).
fn process(&mut self, event: Events, _: &mut EventOps) {
let source = event.fd();
let event_set = event.event_set();
// TODO: also check for errors. Pending high level discussions on how we want
// to handle errors in devices.
let supported_events = EventSet::IN;
if !supported_events.contains(event_set) {
warn!(
"Received unknown event: {:?} from source: {:?}",
event_set, source
);
return;
}
if source == self.write_metrics_event_fd.as_raw_fd() {
self.write_metrics_event_fd.read();
self.write_metrics();
} else {
error!("Spurious METRICS event!");
}
}
fn init(&mut self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::new(&self.write_metrics_event_fd, EventSet::IN)) {
error!("Failed to register metrics event: {}", err);
}
}
}
#[cfg(test)]
pub mod tests {
use std::sync::{Arc, Mutex};
use event_manager::{EventManager, SubscriberOps};
use super::*;
#[test]
fn test_periodic_metrics() {
let mut event_manager = EventManager::new().expect("Unable to create EventManager");
let metrics = Arc::new(Mutex::new(PeriodicMetrics::new()));
event_manager.add_subscriber(metrics.clone());
let flush_period_ms = 50u16;
metrics
.lock()
.expect("Unlock failed.")
.start(u64::from(flush_period_ms));
// .start() does an initial flush.
assert_eq!(metrics.lock().expect("Unlock failed.").flush_counter, 1);
// Wait for at most 1.5x period.
event_manager
.run_with_timeout(i32::from(flush_period_ms) + i32::from(flush_period_ms) / 2)
.expect("Metrics event timeout or error.");
// Verify there was another flush.
assert_eq!(metrics.lock().expect("Unlock failed.").flush_counter, 2);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server_adapter.rs | src/firecracker/src/api_server_adapter.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::os::unix::io::AsRawFd;
use std::path::PathBuf;
use std::sync::mpsc::{Receiver, Sender, TryRecvError, channel};
use std::sync::{Arc, Mutex};
use std::thread;
use event_manager::{EventOps, Events, MutEventSubscriber, SubscriberOps};
use vmm::logger::{ProcessTimeReporter, error, info, warn};
use vmm::resources::VmResources;
use vmm::rpc_interface::{
ApiRequest, ApiResponse, BuildMicrovmFromRequestsError, PrebootApiController,
RuntimeApiController, VmmAction,
};
use vmm::seccomp::BpfThreadMap;
use vmm::vmm_config::instance_info::InstanceInfo;
use vmm::{EventManager, FcExitCode, Vmm};
use vmm_sys_util::epoll::EventSet;
use vmm_sys_util::eventfd::EventFd;
use super::api_server::{ApiServer, HttpServer, ServerError};
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum ApiServerError {
/// Failed to build MicroVM: {0}.
BuildMicroVmError(BuildMicrovmFromRequestsError),
/// MicroVM stopped with an error: {0:?}
MicroVMStoppedWithError(FcExitCode),
/// Failed to open the API socket at: {0}. Check that it is not already used.
FailedToBindSocket(String),
/// Failed to bind and run the HTTP server: {0}
FailedToBindAndRunHttpServer(ServerError),
/// Failed to build MicroVM from Json: {0}
BuildFromJson(crate::BuildFromJsonError),
}
#[derive(Debug)]
struct ApiServerAdapter {
api_event_fd: EventFd,
from_api: Receiver<ApiRequest>,
to_api: Sender<ApiResponse>,
controller: RuntimeApiController,
}
impl ApiServerAdapter {
/// Runs the vmm to completion, while any arising control events are deferred
/// to a `RuntimeApiController`.
fn run_microvm(
api_event_fd: EventFd,
from_api: Receiver<ApiRequest>,
to_api: Sender<ApiResponse>,
vm_resources: VmResources,
vmm: Arc<Mutex<Vmm>>,
event_manager: &mut EventManager,
) -> Result<(), ApiServerError> {
let api_adapter = Arc::new(Mutex::new(Self {
api_event_fd,
from_api,
to_api,
controller: RuntimeApiController::new(vm_resources, vmm.clone()),
}));
event_manager.add_subscriber(api_adapter);
loop {
event_manager
.run()
.expect("EventManager events driver fatal error");
match vmm.lock().unwrap().shutdown_exit_code() {
Some(FcExitCode::Ok) => break,
Some(exit_code) => return Err(ApiServerError::MicroVMStoppedWithError(exit_code)),
None => continue,
}
}
Ok(())
}
fn handle_request(&mut self, req_action: VmmAction) {
let response = self.controller.handle_request(req_action);
// Send back the result.
self.to_api
.send(Box::new(response))
.map_err(|_| ())
.expect("one-shot channel closed");
}
}
impl MutEventSubscriber for ApiServerAdapter {
/// Handle a read event (EPOLLIN).
fn process(&mut self, event: Events, _: &mut EventOps) {
let source = event.fd();
let event_set = event.event_set();
if source == self.api_event_fd.as_raw_fd() && event_set == EventSet::IN {
let _ = self.api_event_fd.read();
match self.from_api.try_recv() {
Ok(api_request) => {
let request_is_pause = *api_request == VmmAction::Pause;
self.handle_request(*api_request);
// If the latest req is a pause request, temporarily switch to a mode where we
// do blocking `recv`s on the `from_api` receiver in a loop, until we get
// unpaused. The device emulation is implicitly paused since we do not
// relinquish control to the event manager because we're not returning from
// `process`.
if request_is_pause {
// This loop only attempts to process API requests, so things like the
// metric flush timerfd handling are frozen as well.
loop {
let req = self.from_api.recv().expect("Error receiving API request.");
let req_is_resume = *req == VmmAction::Resume;
self.handle_request(*req);
if req_is_resume {
break;
}
}
}
}
Err(TryRecvError::Empty) => {
warn!("Got a spurious notification from api thread");
}
Err(TryRecvError::Disconnected) => {
panic!("The channel's sending half was disconnected. Cannot receive data.");
}
};
} else {
error!("Spurious EventManager event for handler: ApiServerAdapter");
}
}
fn init(&mut self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::new(&self.api_event_fd, EventSet::IN)) {
error!("Failed to register activate event: {}", err);
}
}
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn run_with_api(
seccomp_filters: &mut BpfThreadMap,
config_json: Option<String>,
bind_path: PathBuf,
instance_info: InstanceInfo,
process_time_reporter: ProcessTimeReporter,
boot_timer_enabled: bool,
pci_enabled: bool,
api_payload_limit: usize,
mmds_size_limit: usize,
metadata_json: Option<&str>,
) -> Result<(), ApiServerError> {
// FD to notify of API events. This is a blocking eventfd by design.
// It is used in the config/pre-boot loop which is a simple blocking loop
// which only consumes API events.
let api_event_fd = EventFd::new(libc::EFD_SEMAPHORE).expect("Cannot create API Eventfd.");
// FD used to signal API thread to stop/shutdown.
let api_kill_switch = EventFd::new(libc::EFD_NONBLOCK).expect("Cannot create API kill switch.");
// Channels for both directions between Vmm and Api threads.
let (to_vmm, from_api) = channel();
let (to_api, from_vmm) = channel();
let to_vmm_event_fd = api_event_fd
.try_clone()
.expect("Failed to clone API event FD");
let api_seccomp_filter = seccomp_filters
.remove("api")
.expect("Missing seccomp filter for API thread.");
let mut server = match HttpServer::new(&bind_path) {
Ok(s) => s,
Err(ServerError::IOError(inner)) if inner.kind() == std::io::ErrorKind::AddrInUse => {
let sock_path = bind_path.display().to_string();
return Err(ApiServerError::FailedToBindSocket(sock_path));
}
Err(err) => {
return Err(ApiServerError::FailedToBindAndRunHttpServer(err));
}
};
info!("Listening on API socket ({bind_path:?}).");
let api_kill_switch_clone = api_kill_switch
.try_clone()
.expect("Failed to clone API kill switch");
server
.add_kill_switch(api_kill_switch_clone)
.expect("Cannot add HTTP server kill switch");
// Start the separate API thread.
let api_thread = thread::Builder::new()
.name("fc_api".to_owned())
.spawn(move || {
ApiServer::new(to_vmm, from_vmm, to_vmm_event_fd).run(
server,
process_time_reporter,
&api_seccomp_filter,
api_payload_limit,
);
})
.expect("API thread spawn failed.");
let mut event_manager = EventManager::new().expect("Unable to create EventManager");
// Create the firecracker metrics object responsible for periodically printing metrics.
let firecracker_metrics = Arc::new(Mutex::new(super::metrics::PeriodicMetrics::new()));
event_manager.add_subscriber(firecracker_metrics.clone());
// Configure, build and start the microVM.
let build_result = match config_json {
Some(json) => super::build_microvm_from_json(
seccomp_filters,
&mut event_manager,
json,
instance_info,
boot_timer_enabled,
pci_enabled,
mmds_size_limit,
metadata_json,
)
.map_err(ApiServerError::BuildFromJson),
None => PrebootApiController::build_microvm_from_requests(
seccomp_filters,
&mut event_manager,
instance_info,
&from_api,
&to_api,
&api_event_fd,
boot_timer_enabled,
pci_enabled,
mmds_size_limit,
metadata_json,
)
.map_err(ApiServerError::BuildMicroVmError),
};
let result = build_result.and_then(|(vm_resources, vmm)| {
firecracker_metrics
.lock()
.expect("Poisoned lock")
.start(super::metrics::WRITE_METRICS_PERIOD_MS);
ApiServerAdapter::run_microvm(
api_event_fd,
from_api,
to_api,
vm_resources,
vmm,
&mut event_manager,
)
});
api_kill_switch.write(1).unwrap();
// This call to thread::join() should block until the API thread has processed the
// shutdown-internal and returns from its function.
api_thread.join().expect("Api thread should join");
result
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/mod.rs | src/firecracker/src/api_server/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Implements the interface for intercepting API requests, forwarding them to the VMM
//! and responding to the user.
//! It is constructed on top of an HTTP Server that uses Unix Domain Sockets and `EPOLL` to
//! handle multiple connections on the same thread.
pub mod parsed_request;
pub mod request;
use std::fmt::Debug;
use std::sync::mpsc;
pub use micro_http::{Body, HttpServer, Request, Response, ServerError, StatusCode, Version};
use parsed_request::{ParsedRequest, RequestAction};
use serde_json::json;
use utils::time::{ClockType, get_time_us};
use vmm::logger::{
METRICS, ProcessTimeReporter, debug, error, info, update_metric_with_elapsed_time, warn,
};
use vmm::rpc_interface::{ApiRequest, ApiResponse, VmmAction};
use vmm::seccomp::BpfProgramRef;
use vmm::vmm_config::snapshot::SnapshotType;
use vmm_sys_util::eventfd::EventFd;
/// Structure associated with the API server implementation.
#[derive(Debug)]
pub struct ApiServer {
/// Sender which allows passing messages to the VMM.
api_request_sender: mpsc::Sender<ApiRequest>,
/// Receiver which collects messages from the VMM.
vmm_response_receiver: mpsc::Receiver<ApiResponse>,
/// FD on which we notify the VMM that we have sent at least one
/// `VmmRequest`.
to_vmm_fd: EventFd,
}
impl ApiServer {
/// Constructor for `ApiServer`.
///
/// Returns the newly formed `ApiServer`.
pub fn new(
api_request_sender: mpsc::Sender<ApiRequest>,
vmm_response_receiver: mpsc::Receiver<ApiResponse>,
to_vmm_fd: EventFd,
) -> Self {
ApiServer {
api_request_sender,
vmm_response_receiver,
to_vmm_fd,
}
}
/// Runs the Api Server.
///
/// # Arguments
///
/// * `path` - the socket path on which the server will wait for requests.
/// * `start_time_us` - the timestamp for when the process was started in us.
/// * `start_time_cpu_us` - the timestamp for when the process was started in CPU us.
/// * `seccomp_filter` - the seccomp filter to apply.
pub fn run(
&mut self,
mut server: HttpServer,
process_time_reporter: ProcessTimeReporter,
seccomp_filter: BpfProgramRef,
api_payload_limit: usize,
) {
// Set the api payload size limit.
server.set_payload_max_size(api_payload_limit);
// Load seccomp filters on the API thread.
// Execution panics if filters cannot be loaded, use --no-seccomp if skipping filters
// altogether is the desired behaviour.
if let Err(err) = vmm::seccomp::apply_filter(seccomp_filter) {
panic!(
"Failed to set the requested seccomp filters on the API thread: {}",
err
);
}
server.start_server().expect("Cannot start HTTP server");
info!("API server started.");
// Store process start time metric.
process_time_reporter.report_start_time();
// Store process CPU start time metric.
process_time_reporter.report_cpu_start_time();
loop {
let request_vec = match server.requests() {
Ok(vec) => vec,
Err(ServerError::ShutdownEvent) => {
server.flush_outgoing_writes();
debug!("shutdown request received, API server thread ending.");
return;
}
Err(err) => {
// print request error, but keep server running
error!("API Server error on retrieving incoming request: {}", err);
continue;
}
};
for server_request in request_vec {
let request_processing_start_us = get_time_us(ClockType::Monotonic);
// Use `self.handle_request()` as the processing callback.
let response = server_request
.process(|request| self.handle_request(request, request_processing_start_us));
if let Err(err) = server.respond(response) {
error!("API Server encountered an error on response: {}", err);
};
let delta_us = get_time_us(ClockType::Monotonic) - request_processing_start_us;
debug!("Total previous API call duration: {} us.", delta_us);
}
}
}
/// Handles an API request received through the associated socket.
pub fn handle_request(
&mut self,
request: &Request,
request_processing_start_us: u64,
) -> Response {
match ParsedRequest::try_from(request).map(|r| r.into_parts()) {
Ok((req_action, mut parsing_info)) => {
let mut response = match req_action {
RequestAction::Sync(vmm_action) => {
self.serve_vmm_action_request(vmm_action, request_processing_start_us)
}
};
if let Some(message) = parsing_info.take_deprecation_message() {
warn!("{}", message);
response.set_deprecation();
}
response
}
Err(err) => {
error!("{:?}", err);
err.into()
}
}
}
fn serve_vmm_action_request(
&mut self,
vmm_action: Box<VmmAction>,
request_processing_start_us: u64,
) -> Response {
let metric_with_action = match *vmm_action {
VmmAction::CreateSnapshot(ref params) => match params.snapshot_type {
SnapshotType::Full => Some((
&METRICS.latencies_us.full_create_snapshot,
"create full snapshot",
)),
SnapshotType::Diff => Some((
&METRICS.latencies_us.diff_create_snapshot,
"create diff snapshot",
)),
},
VmmAction::LoadSnapshot(_) => {
Some((&METRICS.latencies_us.load_snapshot, "load snapshot"))
}
VmmAction::Pause => Some((&METRICS.latencies_us.pause_vm, "pause vm")),
VmmAction::Resume => Some((&METRICS.latencies_us.resume_vm, "resume vm")),
_ => None,
};
self.api_request_sender
.send(vmm_action)
.expect("Failed to send VMM message");
self.to_vmm_fd.write(1).expect("Cannot update send VMM fd");
let vmm_outcome = *(self.vmm_response_receiver.recv().expect("VMM disconnected"));
let response = ParsedRequest::convert_to_response(&vmm_outcome);
if vmm_outcome.is_ok()
&& let Some((metric, action)) = metric_with_action
{
let elapsed_time_us =
update_metric_with_elapsed_time(metric, request_processing_start_us);
info!("'{}' API request took {} us.", action, elapsed_time_us);
}
response
}
/// An HTTP response which also includes a body.
pub(crate) fn json_response<T: Into<String> + Debug>(status: StatusCode, body: T) -> Response {
let mut response = Response::new(Version::Http11, status);
response.set_body(Body::new(body.into()));
response
}
fn json_fault_message<T: AsRef<str> + serde::Serialize + Debug>(msg: T) -> String {
json!({ "fault_message": msg }).to_string()
}
}
#[cfg(test)]
mod tests {
use std::io::{Read, Write};
use std::os::unix::net::UnixStream;
use std::path::PathBuf;
use std::sync::mpsc::channel;
use std::thread;
use micro_http::HttpConnection;
use utils::time::ClockType;
use vmm::builder::StartMicrovmError;
use vmm::logger::StoreMetric;
use vmm::rpc_interface::{VmmActionError, VmmData};
use vmm::seccomp::get_empty_filters;
use vmm::vmm_config::instance_info::InstanceInfo;
use vmm::vmm_config::snapshot::CreateSnapshotParams;
use vmm_sys_util::tempfile::TempFile;
use super::request::cpu_configuration::parse_put_cpu_config;
use super::*;
/// Test unescaped CPU template in JSON format.
/// Newlines injected into a field's value to
/// test deserialization and logging.
#[cfg(target_arch = "x86_64")]
const TEST_UNESCAPED_JSON_TEMPLATE: &str = r#"{
"msr_modifiers": [
{
"addr": "0x0\n\n\n\nTEST\n\n\n\n",
"bitmap": "0b00"
}
]
}"#;
#[cfg(target_arch = "aarch64")]
pub const TEST_UNESCAPED_JSON_TEMPLATE: &str = r#"{
"reg_modifiers": [
{
"addr": "0x0\n\n\n\nTEST\n\n\n\n",
"bitmap": "0b00"
}
]
}"#;
#[test]
fn test_serve_vmm_action_request() {
let to_vmm_fd = EventFd::new(libc::EFD_NONBLOCK).unwrap();
let (api_request_sender, _from_api) = channel();
let (to_api, vmm_response_receiver) = channel();
let mut api_server = ApiServer::new(api_request_sender, vmm_response_receiver, to_vmm_fd);
to_api
.send(Box::new(Err(VmmActionError::StartMicrovm(
StartMicrovmError::MissingKernelConfig,
))))
.unwrap();
let response = api_server.serve_vmm_action_request(Box::new(VmmAction::StartMicroVm), 0);
assert_eq!(response.status(), StatusCode::BadRequest);
// Since the vmm side is mocked out in this test, the call to serve_vmm_action_request can
// complete very fast (under 1us, the resolution of our metrics). In these cases, the
// latencies_us.pause_vm metric can be set to 0, failing the assertion below. By
// subtracting 1 we assure that the metric will always be set to at least 1 (if it gets set
// at all, which is what this test is trying to prove).
let start_time_us = get_time_us(ClockType::Monotonic) - 1;
assert_eq!(METRICS.latencies_us.pause_vm.fetch(), 0);
to_api.send(Box::new(Ok(VmmData::Empty))).unwrap();
let response =
api_server.serve_vmm_action_request(Box::new(VmmAction::Pause), start_time_us);
assert_eq!(response.status(), StatusCode::NoContent);
assert_ne!(METRICS.latencies_us.pause_vm.fetch(), 0);
assert_eq!(METRICS.latencies_us.diff_create_snapshot.fetch(), 0);
to_api
.send(Box::new(Err(VmmActionError::OperationNotSupportedPreBoot)))
.unwrap();
let response = api_server.serve_vmm_action_request(
Box::new(VmmAction::CreateSnapshot(CreateSnapshotParams {
snapshot_type: SnapshotType::Diff,
snapshot_path: PathBuf::new(),
mem_file_path: PathBuf::new(),
})),
start_time_us,
);
assert_eq!(response.status(), StatusCode::BadRequest);
// The metric should not be updated if the request wasn't successful.
assert_eq!(METRICS.latencies_us.diff_create_snapshot.fetch(), 0);
to_api.send(Box::new(Ok(VmmData::Empty))).unwrap();
let response = api_server.serve_vmm_action_request(
Box::new(VmmAction::CreateSnapshot(CreateSnapshotParams {
snapshot_type: SnapshotType::Diff,
snapshot_path: PathBuf::new(),
mem_file_path: PathBuf::new(),
})),
start_time_us,
);
assert_eq!(response.status(), StatusCode::NoContent);
assert_ne!(METRICS.latencies_us.diff_create_snapshot.fetch(), 0);
assert_eq!(METRICS.latencies_us.full_create_snapshot.fetch(), 0);
}
#[test]
fn test_handle_request() {
let to_vmm_fd = EventFd::new(libc::EFD_NONBLOCK).unwrap();
let (api_request_sender, _from_api) = channel();
let (to_api, vmm_response_receiver) = channel();
let mut api_server = ApiServer::new(api_request_sender, vmm_response_receiver, to_vmm_fd);
// Test an Actions request.
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(
b"PUT /actions HTTP/1.1\r\n\
Content-Type: application/json\r\n\
Content-Length: 49\r\n\r\n{ \
\"action_type\": \"Invalid\", \
\"payload\": \"string\" \
}",
)
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
let response = api_server.handle_request(&req, 0);
assert_eq!(response.status(), StatusCode::BadRequest);
// Test a Get Info request.
to_api
.send(Box::new(Ok(VmmData::InstanceInformation(
InstanceInfo::default(),
))))
.unwrap();
sender.write_all(b"GET / HTTP/1.1\r\n\r\n").unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
let response = api_server.handle_request(&req, 0);
assert_eq!(response.status(), StatusCode::OK);
// Test erroneous request.
sender
.write_all(
b"GET /mmds HTTP/1.1\r\n\
Content-Type: application/json\r\n\
Content-Length: 2\r\n\r\n{}",
)
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
let response = api_server.handle_request(&req, 0);
assert_eq!(response.status(), StatusCode::BadRequest);
}
#[test]
fn test_handle_request_logging() {
let cpu_template_json = TEST_UNESCAPED_JSON_TEMPLATE;
let result = parse_put_cpu_config(&Body::new(cpu_template_json.as_bytes()));
let result_error = result.unwrap_err();
let err_msg = format!("{}", result_error);
assert_ne!(
1,
err_msg.lines().count(),
"Error Body response:\n{}",
err_msg
);
let err_msg_with_debug = format!("{:?}", result_error);
// Check the loglines are on one line.
assert_eq!(
1,
err_msg_with_debug.lines().count(),
"Error Body response:\n{}",
err_msg_with_debug
);
}
#[test]
fn test_bind_and_run() {
let mut tmp_socket = TempFile::new().unwrap();
tmp_socket.remove().unwrap();
let path_to_socket = tmp_socket.as_path().to_str().unwrap().to_owned();
let api_thread_path_to_socket = path_to_socket.clone();
let to_vmm_fd = EventFd::new(libc::EFD_NONBLOCK).unwrap();
let (api_request_sender, _from_api) = channel();
let (to_api, vmm_response_receiver) = channel();
let seccomp_filters = get_empty_filters();
let server = HttpServer::new(PathBuf::from(api_thread_path_to_socket)).unwrap();
thread::Builder::new()
.name("fc_api_test".to_owned())
.spawn(move || {
ApiServer::new(api_request_sender, vmm_response_receiver, to_vmm_fd).run(
server,
ProcessTimeReporter::new(Some(1), Some(1), Some(1)),
seccomp_filters.get("api").unwrap(),
vmm::HTTP_MAX_PAYLOAD_SIZE,
);
})
.unwrap();
to_api
.send(Box::new(Ok(VmmData::InstanceInformation(
InstanceInfo::default(),
))))
.unwrap();
let mut sock = UnixStream::connect(PathBuf::from(path_to_socket)).unwrap();
// Send a GET InstanceInfo request.
sock.write_all(b"GET / HTTP/1.1\r\n\r\n").unwrap();
let mut buf: [u8; 100] = [0; 100];
assert!(sock.read(&mut buf[..]).unwrap() > 0);
// Send an erroneous request.
sock.write_all(b"OPTIONS / HTTP/1.1\r\n\r\n").unwrap();
let mut buf: [u8; 100] = [0; 100];
assert!(sock.read(&mut buf[..]).unwrap() > 0);
}
#[test]
fn test_bind_and_run_with_limit() {
let mut tmp_socket = TempFile::new().unwrap();
tmp_socket.remove().unwrap();
let path_to_socket = tmp_socket.as_path().to_str().unwrap().to_owned();
let api_thread_path_to_socket = path_to_socket.clone();
let to_vmm_fd = EventFd::new(libc::EFD_NONBLOCK).unwrap();
let (api_request_sender, _from_api) = channel();
let (_to_api, vmm_response_receiver) = channel();
let seccomp_filters = get_empty_filters();
let server = HttpServer::new(PathBuf::from(api_thread_path_to_socket)).unwrap();
thread::Builder::new()
.name("fc_api_test".to_owned())
.spawn(move || {
ApiServer::new(api_request_sender, vmm_response_receiver, to_vmm_fd).run(
server,
ProcessTimeReporter::new(Some(1), Some(1), Some(1)),
seccomp_filters.get("api").unwrap(),
50,
)
})
.unwrap();
let mut sock = UnixStream::connect(PathBuf::from(path_to_socket)).unwrap();
// Send a GET mmds request.
sock.write_all(
b"PUT http://localhost/home HTTP/1.1\r\n\
Content-Length: 50000\r\n\r\naaaaaa",
)
.unwrap();
let mut buf: [u8; 265] = [0; 265];
assert!(sock.read(&mut buf[..]).unwrap() > 0);
let error_message = b"HTTP/1.1 400 \r\n\
Server: Firecracker API\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: 146\r\n\r\n{ \"error\": \"\
Request payload with size 50000 is larger than \
the limit of 50 allowed by server.\nAll previous \
unanswered requests will be dropped.\" }";
assert_eq!(&buf[..], &error_message[..]);
}
#[test]
fn test_kill_switch() {
let mut tmp_socket = TempFile::new().unwrap();
tmp_socket.remove().unwrap();
let path_to_socket = tmp_socket.as_path().to_str().unwrap().to_owned();
let to_vmm_fd = EventFd::new(libc::EFD_NONBLOCK).unwrap();
let (api_request_sender, _from_api) = channel();
let (_to_api, vmm_response_receiver) = channel();
let seccomp_filters = get_empty_filters();
let api_kill_switch = EventFd::new(libc::EFD_NONBLOCK).unwrap();
let kill_switch = api_kill_switch.try_clone().unwrap();
let mut server = HttpServer::new(PathBuf::from(path_to_socket)).unwrap();
server.add_kill_switch(kill_switch).unwrap();
let api_thread = thread::Builder::new()
.name("fc_api_test".to_owned())
.spawn(move || {
ApiServer::new(api_request_sender, vmm_response_receiver, to_vmm_fd).run(
server,
ProcessTimeReporter::new(Some(1), Some(1), Some(1)),
seccomp_filters.get("api").unwrap(),
vmm::HTTP_MAX_PAYLOAD_SIZE,
)
})
.unwrap();
// Signal the API thread it should shut down.
api_kill_switch.write(1).unwrap();
// Verify API thread was brought down.
api_thread.join().unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/parsed_request.rs | src/firecracker/src/api_server/parsed_request.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use micro_http::{Body, Method, Request, Response, StatusCode, Version};
use serde::ser::Serialize;
use serde_json::Value;
use vmm::logger::{Level, error, info, log_enabled};
use vmm::rpc_interface::{VmmAction, VmmActionError, VmmData};
use super::ApiServer;
use super::request::actions::parse_put_actions;
use super::request::balloon::{parse_get_balloon, parse_patch_balloon, parse_put_balloon};
use super::request::boot_source::parse_put_boot_source;
use super::request::cpu_configuration::parse_put_cpu_config;
use super::request::drive::{parse_patch_drive, parse_put_drive};
use super::request::entropy::parse_put_entropy;
use super::request::instance_info::parse_get_instance_info;
use super::request::logger::parse_put_logger;
use super::request::machine_configuration::{
parse_get_machine_config, parse_patch_machine_config, parse_put_machine_config,
};
use super::request::metrics::parse_put_metrics;
use super::request::mmds::{parse_get_mmds, parse_patch_mmds, parse_put_mmds};
use super::request::net::{parse_patch_net, parse_put_net};
use super::request::pmem::parse_put_pmem;
use super::request::snapshot::{parse_patch_vm_state, parse_put_snapshot};
use super::request::version::parse_get_version;
use super::request::vsock::parse_put_vsock;
use crate::api_server::request::hotplug::memory::{
parse_get_memory_hotplug, parse_patch_memory_hotplug, parse_put_memory_hotplug,
};
use crate::api_server::request::serial::parse_put_serial;
#[derive(Debug)]
pub(crate) enum RequestAction {
Sync(Box<VmmAction>),
}
#[derive(Debug, Default, PartialEq)]
pub(crate) struct ParsingInfo {
deprecation_message: Option<String>,
}
impl ParsingInfo {
pub fn append_deprecation_message(&mut self, message: &str) {
match self.deprecation_message.as_mut() {
None => self.deprecation_message = Some(message.to_owned()),
Some(s) => (*s).push_str(message),
}
}
pub fn take_deprecation_message(&mut self) -> Option<String> {
self.deprecation_message.take()
}
}
#[derive(Debug)]
pub(crate) struct ParsedRequest {
action: RequestAction,
parsing_info: ParsingInfo,
}
impl TryFrom<&Request> for ParsedRequest {
type Error = RequestError;
fn try_from(request: &Request) -> Result<Self, Self::Error> {
let request_uri = request.uri().get_abs_path().to_string();
let description = describe(
request.method(),
request_uri.as_str(),
request.body.as_ref(),
);
info!("The API server received a {description}.");
// Split request uri by '/' by doing:
// 1. Trim starting '/' characters
// 2. Splitting by '/'
let mut path_tokens = request_uri.trim_start_matches('/').split_terminator('/');
let path = path_tokens.next().unwrap_or("");
match (request.method(), path, request.body.as_ref()) {
(Method::Get, "", None) => parse_get_instance_info(),
(Method::Get, "balloon", None) => parse_get_balloon(path_tokens),
(Method::Get, "version", None) => parse_get_version(),
(Method::Get, "vm", None) if path_tokens.next() == Some("config") => {
Ok(ParsedRequest::new_sync(VmmAction::GetFullVmConfig))
}
(Method::Get, "machine-config", None) => parse_get_machine_config(),
(Method::Get, "mmds", None) => parse_get_mmds(),
(Method::Get, "hotplug", None) if path_tokens.next() == Some("memory") => {
parse_get_memory_hotplug()
}
(Method::Get, _, Some(_)) => method_to_error(Method::Get),
(Method::Put, "actions", Some(body)) => parse_put_actions(body),
(Method::Put, "balloon", Some(body)) => parse_put_balloon(body),
(Method::Put, "boot-source", Some(body)) => parse_put_boot_source(body),
(Method::Put, "cpu-config", Some(body)) => parse_put_cpu_config(body),
(Method::Put, "drives", Some(body)) => parse_put_drive(body, path_tokens.next()),
(Method::Put, "pmem", Some(body)) => parse_put_pmem(body, path_tokens.next()),
(Method::Put, "logger", Some(body)) => parse_put_logger(body),
(Method::Put, "serial", Some(body)) => parse_put_serial(body),
(Method::Put, "machine-config", Some(body)) => parse_put_machine_config(body),
(Method::Put, "metrics", Some(body)) => parse_put_metrics(body),
(Method::Put, "mmds", Some(body)) => parse_put_mmds(body, path_tokens.next()),
(Method::Put, "network-interfaces", Some(body)) => {
parse_put_net(body, path_tokens.next())
}
(Method::Put, "snapshot", Some(body)) => parse_put_snapshot(body, path_tokens.next()),
(Method::Put, "vsock", Some(body)) => parse_put_vsock(body),
(Method::Put, "entropy", Some(body)) => parse_put_entropy(body),
(Method::Put, "hotplug", Some(body)) if path_tokens.next() == Some("memory") => {
parse_put_memory_hotplug(body)
}
(Method::Put, _, None) => method_to_error(Method::Put),
(Method::Patch, "balloon", body) => parse_patch_balloon(body, path_tokens),
(Method::Patch, "drives", Some(body)) => parse_patch_drive(body, path_tokens.next()),
(Method::Patch, "machine-config", Some(body)) => parse_patch_machine_config(body),
(Method::Patch, "mmds", Some(body)) => parse_patch_mmds(body),
(Method::Patch, "network-interfaces", Some(body)) => {
parse_patch_net(body, path_tokens.next())
}
(Method::Patch, "vm", Some(body)) => parse_patch_vm_state(body),
(Method::Patch, "hotplug", Some(body)) if path_tokens.next() == Some("memory") => {
parse_patch_memory_hotplug(body)
}
(Method::Patch, _, None) => method_to_error(Method::Patch),
(method, unknown_uri, _) => Err(RequestError::InvalidPathMethod(
unknown_uri.to_string(),
method,
)),
}
}
}
impl ParsedRequest {
pub(crate) fn new(action: RequestAction) -> Self {
Self {
action,
parsing_info: Default::default(),
}
}
pub(crate) fn into_parts(self) -> (RequestAction, ParsingInfo) {
(self.action, self.parsing_info)
}
pub(crate) fn parsing_info(&mut self) -> &mut ParsingInfo {
&mut self.parsing_info
}
pub(crate) fn success_response_with_data<T>(body_data: &T) -> Response
where
T: ?Sized + Serialize + Debug,
{
info!("The request was executed successfully. Status code: 200 OK.");
let mut response = Response::new(Version::Http11, StatusCode::OK);
response.set_body(Body::new(serde_json::to_string(body_data).unwrap()));
response
}
pub(crate) fn success_response_with_mmds_value(body_data: &Value) -> Response {
info!("The request was executed successfully. Status code: 200 OK.");
let mut response = Response::new(Version::Http11, StatusCode::OK);
let body_str = match body_data {
Value::Null => "{}".to_string(),
_ => serde_json::to_string(body_data).unwrap(),
};
response.set_body(Body::new(body_str));
response
}
pub(crate) fn convert_to_response(
request_outcome: &Result<VmmData, VmmActionError>,
) -> Response {
match request_outcome {
Ok(vmm_data) => match vmm_data {
VmmData::Empty => {
info!("The request was executed successfully. Status code: 204 No Content.");
Response::new(Version::Http11, StatusCode::NoContent)
}
VmmData::MachineConfiguration(machine_config) => {
Self::success_response_with_data(machine_config)
}
VmmData::MmdsValue(value) => Self::success_response_with_mmds_value(value),
VmmData::BalloonConfig(balloon_config) => {
Self::success_response_with_data(balloon_config)
}
VmmData::BalloonStats(stats) => Self::success_response_with_data(stats),
VmmData::VirtioMemStatus(data) => Self::success_response_with_data(data),
VmmData::HintingStatus(hinting_status) => {
Self::success_response_with_data(hinting_status)
}
VmmData::InstanceInformation(info) => Self::success_response_with_data(info),
VmmData::VmmVersion(version) => Self::success_response_with_data(
&serde_json::json!({ "firecracker_version": version.as_str() }),
),
VmmData::FullVmConfig(config) => Self::success_response_with_data(config),
},
Err(vmm_action_error) => {
let mut response = match vmm_action_error {
VmmActionError::MmdsLimitExceeded(_err) => {
error!(
"Received Error. Status code: 413 Payload too large. Message: {}",
vmm_action_error
);
Response::new(Version::Http11, StatusCode::PayloadTooLarge)
}
_ => {
error!(
"Received Error. Status code: 400 Bad Request. Message: {}",
vmm_action_error
);
Response::new(Version::Http11, StatusCode::BadRequest)
}
};
response.set_body(Body::new(ApiServer::json_fault_message(
vmm_action_error.to_string(),
)));
response
}
}
}
/// Helper function to avoid boiler-plate code.
pub(crate) fn new_sync(vmm_action: VmmAction) -> ParsedRequest {
ParsedRequest::new(RequestAction::Sync(Box::new(vmm_action)))
}
}
/// Helper function for metric-logging purposes on API requests.
///
/// # Arguments
///
/// * `method` - one of `GET`, `PATCH`, `PUT`
/// * `path` - path of the API request
/// * `body` - body of the API request
fn describe(method: Method, path: &str, body: Option<&Body>) -> String {
match (path, body) {
("/mmds", Some(_)) | (_, None) => format!("{:?} request on {:?}", method, path),
("/cpu-config", Some(payload_value)) => {
// If the log level is at Debug or higher, include the CPU template in
// the log line.
if log_enabled!(Level::Debug) {
describe_with_body(method, path, payload_value)
} else {
format!(
"{:?} request on {:?}. To view the CPU template received by the API, \
configure log-level to DEBUG",
method, path
)
}
}
(_, Some(payload_value)) => describe_with_body(method, path, payload_value),
}
}
fn describe_with_body(method: Method, path: &str, payload_value: &Body) -> String {
format!(
"{:?} request on {:?} with body {:?}",
method,
path,
std::str::from_utf8(payload_value.body.as_slice())
.unwrap_or("inconvertible to UTF-8")
.to_string()
)
}
/// Generates a `GenericError` for each request method.
pub(crate) fn method_to_error(method: Method) -> Result<ParsedRequest, RequestError> {
match method {
Method::Get => Err(RequestError::Generic(
StatusCode::BadRequest,
"GET request cannot have a body.".to_string(),
)),
Method::Put => Err(RequestError::Generic(
StatusCode::BadRequest,
"Empty PUT request.".to_string(),
)),
Method::Patch => Err(RequestError::Generic(
StatusCode::BadRequest,
"Empty PATCH request.".to_string(),
)),
}
}
#[derive(Debug, thiserror::Error)]
pub(crate) enum RequestError {
// The resource ID is empty.
#[error("The ID cannot be empty.")]
EmptyID,
// A generic error, with a given status code and message to be turned into a fault message.
#[error("{1}")]
Generic(StatusCode, String),
// The resource ID must only contain alphanumeric characters and '_'.
#[error("API Resource IDs can only contain alphanumeric characters and underscores.")]
InvalidID,
// The HTTP method & request path combination is not valid.
#[error("Invalid request method and/or path: {} {}.", .1.to_str(), .0)]
InvalidPathMethod(String, Method),
// An error occurred when deserializing the json body of a request.
#[error("An error occurred when deserializing the json body of a request: {0}.")]
SerdeJson(#[from] serde_json::Error),
}
// It's convenient to turn errors into HTTP responses directly.
impl From<RequestError> for Response {
fn from(err: RequestError) -> Self {
let msg = ApiServer::json_fault_message(format!("{}", err));
match err {
RequestError::Generic(status, _) => ApiServer::json_response(status, msg),
RequestError::EmptyID
| RequestError::InvalidID
| RequestError::InvalidPathMethod(_, _)
| RequestError::SerdeJson(_) => ApiServer::json_response(StatusCode::BadRequest, msg),
}
}
}
// This function is supposed to do id validation for requests.
pub(crate) fn checked_id(id: &str) -> Result<&str, RequestError> {
// todo: are there any checks we want to do on id's?
// not allow them to be empty strings maybe?
// check: ensure string is not empty
if id.is_empty() {
return Err(RequestError::EmptyID);
}
// check: ensure string is alphanumeric
if !id.chars().all(|c| c == '_' || c.is_alphanumeric()) {
return Err(RequestError::InvalidID);
}
Ok(id)
}
#[cfg(test)]
pub mod tests {
use std::io::{Cursor, Write};
use std::os::unix::net::UnixStream;
use std::str::FromStr;
use micro_http::HttpConnection;
use vmm::builder::StartMicrovmError;
use vmm::cpu_config::templates::test_utils::build_test_template;
use vmm::devices::virtio::balloon::device::HintingStatus;
use vmm::resources::VmmConfig;
use vmm::rpc_interface::VmmActionError;
use vmm::vmm_config::balloon::{BalloonDeviceConfig, BalloonStats};
use vmm::vmm_config::instance_info::InstanceInfo;
use vmm::vmm_config::machine_config::MachineConfig;
use super::*;
impl PartialEq for ParsedRequest {
fn eq(&self, other: &ParsedRequest) -> bool {
if self.parsing_info.deprecation_message != other.parsing_info.deprecation_message {
return false;
}
match (&self.action, &other.action) {
(RequestAction::Sync(sync_req), RequestAction::Sync(other_sync_req)) => {
sync_req == other_sync_req
}
}
}
}
pub(crate) fn vmm_action_from_request(req: ParsedRequest) -> VmmAction {
match req.action {
RequestAction::Sync(vmm_action) => *vmm_action,
}
}
pub(crate) fn depr_action_from_req(req: ParsedRequest, msg: Option<String>) -> VmmAction {
let (action_req, mut parsing_info) = req.into_parts();
match action_req {
RequestAction::Sync(vmm_action) => {
let req_msg = parsing_info.take_deprecation_message();
assert!(req_msg.is_some());
assert_eq!(req_msg, msg);
*vmm_action
}
}
}
fn http_response(body: &str, status_code: i32) -> String {
let header = format!(
"HTTP/1.1 {} \r\nServer: Firecracker API\r\nConnection: keep-alive\r\n",
status_code
);
if status_code == 204 {
// No Content
format!("{}{}", header, "\r\n")
} else {
let content = format!(
"Content-Type: application/json\r\nContent-Length: {}\r\n\r\n{}",
body.len(),
body,
);
format!("{}{}", header, content)
}
}
fn http_request(request_type: &str, endpoint: &str, body: Option<&str>) -> String {
let req_no_body = format!(
"{} {} HTTP/1.1\r\nContent-Type: application/json\r\n",
request_type, endpoint
);
if body.is_some() {
return format!(
"{}Content-Length: {}\r\n\r\n{}",
req_no_body,
body.unwrap().len(),
body.unwrap()
);
}
format!("{}\r\n", req_no_body,)
}
#[test]
fn test_missing_slash() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "none", Some("body")).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap_err();
}
#[test]
fn test_checked_id() {
checked_id("dummy").unwrap();
checked_id("dummy_1").unwrap();
assert_eq!(
format!("{}", checked_id("").unwrap_err()),
"The ID cannot be empty."
);
assert_eq!(
format!("{}", checked_id("dummy!!").unwrap_err()),
"API Resource IDs can only contain alphanumeric characters and underscores."
);
}
#[test]
fn test_invalid_get() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "/mmds", Some("body")).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
let parsed_request = ParsedRequest::try_from(&req);
assert!(matches!(
&parsed_request,
Err(RequestError::Generic(StatusCode::BadRequest, s)) if s == "GET request cannot have a body.",
));
}
#[test]
fn test_invalid_put() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("PUT", "/mmds", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
let parsed_request = ParsedRequest::try_from(&req);
assert!(matches!(
&parsed_request,
Err(RequestError::Generic(StatusCode::BadRequest, s)) if s == "Empty PUT request.",
));
}
#[test]
fn test_invalid_patch() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("PATCH", "/mmds", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
let parsed_request = ParsedRequest::try_from(&req);
assert!(matches!(
&parsed_request,
Err(RequestError::Generic(StatusCode::BadRequest, s)) if s == "Empty PATCH request.",
));
sender
.write_all(http_request("PATCH", "/balloon", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
let parsed_request = ParsedRequest::try_from(&req);
assert!(matches!(
&parsed_request,
Err(RequestError::Generic(StatusCode::BadRequest, s)) if s == "Empty PATCH request.",
));
}
#[test]
fn test_error_into_response() {
// Generic error.
let mut buf = Cursor::new(vec![0]);
let response: Response =
RequestError::Generic(StatusCode::BadRequest, "message".to_string()).into();
response.write_all(&mut buf).unwrap();
let body = ApiServer::json_fault_message("message");
let expected_response = http_response(&body, 400);
assert_eq!(buf.into_inner(), expected_response.as_bytes());
// Empty ID error.
let mut buf = Cursor::new(vec![0]);
let response: Response = RequestError::EmptyID.into();
response.write_all(&mut buf).unwrap();
let body = ApiServer::json_fault_message("The ID cannot be empty.");
let expected_response = http_response(&body, 400);
assert_eq!(buf.into_inner(), expected_response.as_bytes());
// Invalid ID error.
let mut buf = Cursor::new(vec![0]);
let response: Response = RequestError::InvalidID.into();
response.write_all(&mut buf).unwrap();
let body = ApiServer::json_fault_message(
"API Resource IDs can only contain alphanumeric characters and underscores.",
);
let expected_response = http_response(&body, 400);
assert_eq!(buf.into_inner(), expected_response.as_bytes());
// Invalid path or method error.
let mut buf = Cursor::new(vec![0]);
let response: Response =
RequestError::InvalidPathMethod("path".to_string(), Method::Get).into();
response.write_all(&mut buf).unwrap();
let body = ApiServer::json_fault_message(format!(
"Invalid request method and/or path: {} {}.",
Method::Get.to_str(),
"path"
));
let expected_response = http_response(&body, 400);
assert_eq!(buf.into_inner(), expected_response.as_bytes());
// Serde error.
let mut buf = Cursor::new(vec![0]);
let serde_error = serde_json::Value::from_str("").unwrap_err();
let response: Response = RequestError::SerdeJson(serde_error).into();
response.write_all(&mut buf).unwrap();
let body = ApiServer::json_fault_message(
"An error occurred when deserializing the json body of a request: EOF while parsing a \
value at line 1 column 0.",
);
let expected_response = http_response(&body, 400);
assert_eq!(buf.into_inner(), expected_response.as_bytes());
}
#[test]
fn test_describe() {
assert_eq!(
describe(Method::Get, "path", None),
"Get request on \"path\""
);
assert_eq!(
describe(Method::Put, "/mmds", None),
"Put request on \"/mmds\""
);
assert_eq!(
describe(Method::Put, "path", Some(&Body::new("body"))),
"Put request on \"path\" with body \"body\""
);
}
#[test]
fn test_convert_to_response() {
let verify_ok_response_with = |vmm_data: VmmData| {
let data = Ok(vmm_data);
let mut buf = Cursor::new(vec![0]);
let expected_response = match data.as_ref().unwrap() {
VmmData::BalloonConfig(cfg) => {
http_response(&serde_json::to_string(cfg).unwrap(), 200)
}
VmmData::BalloonStats(stats) => {
http_response(&serde_json::to_string(stats).unwrap(), 200)
}
VmmData::VirtioMemStatus(data) => {
http_response(&serde_json::to_string(data).unwrap(), 200)
}
VmmData::HintingStatus(status) => {
http_response(&serde_json::to_string(status).unwrap(), 200)
}
VmmData::Empty => http_response("", 204),
VmmData::FullVmConfig(cfg) => {
http_response(&serde_json::to_string(cfg).unwrap(), 200)
}
VmmData::MachineConfiguration(cfg) => {
http_response(&serde_json::to_string(cfg).unwrap(), 200)
}
VmmData::MmdsValue(value) => {
http_response(&serde_json::to_string(value).unwrap(), 200)
}
VmmData::InstanceInformation(info) => {
http_response(&serde_json::to_string(info).unwrap(), 200)
}
VmmData::VmmVersion(version) => http_response(
&serde_json::json!({ "firecracker_version": version.as_str() }).to_string(),
200,
),
};
let response = ParsedRequest::convert_to_response(&data);
response.write_all(&mut buf).unwrap();
assert_eq!(buf.into_inner(), expected_response.as_bytes());
};
verify_ok_response_with(VmmData::BalloonConfig(BalloonDeviceConfig::default()));
verify_ok_response_with(VmmData::BalloonStats(BalloonStats {
swap_in: Some(1),
swap_out: Some(1),
..Default::default()
}));
verify_ok_response_with(VmmData::HintingStatus(HintingStatus {
..Default::default()
}));
verify_ok_response_with(VmmData::Empty);
verify_ok_response_with(VmmData::FullVmConfig(VmmConfig::default()));
verify_ok_response_with(VmmData::MachineConfiguration(MachineConfig::default()));
verify_ok_response_with(VmmData::MmdsValue(serde_json::from_str("{}").unwrap()));
verify_ok_response_with(VmmData::InstanceInformation(InstanceInfo::default()));
verify_ok_response_with(VmmData::VmmVersion(String::default()));
// Error.
let error = VmmActionError::StartMicrovm(StartMicrovmError::MissingKernelConfig);
let mut buf = Cursor::new(vec![0]);
let json = ApiServer::json_fault_message(error.to_string());
let response = ParsedRequest::convert_to_response(&Err(error));
response.write_all(&mut buf).unwrap();
let expected_response = http_response(&json, 400);
assert_eq!(buf.into_inner(), expected_response.as_bytes());
}
#[test]
fn test_try_from_get_info() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "/", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_get_balloon() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "/balloon", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_get_balloon_stats() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "/balloon/statistics", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_get_balloon_hinting() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "/balloon/hinting/status", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_get_machine_config() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "/machine-config", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_get_mmds() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "/mmds", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_get_version() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
sender
.write_all(http_request("GET", "/version", None).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_put_actions() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
let body = "{ \"action_type\": \"FlushMetrics\" }";
sender
.write_all(http_request("PUT", "/actions", Some(body)).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_put_balloon() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
let body =
"{ \"amount_mib\": 0, \"deflate_on_oom\": false, \"stats_polling_interval_s\": 0 }";
sender
.write_all(http_request("PUT", "/balloon", Some(body)).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_put_entropy() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
let body = "{ \"rate_limiter\": { \"bandwidth\" : { \"size\": 0, \"one_time_burst\": 0, \
\"refill_time\": 0 }, \"ops\": { \"size\": 0, \"one_time_burst\": 0, \
\"refill_time\": 0 } } }";
sender
.write_all(http_request("PUT", "/entropy", Some(body)).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_put_boot() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
let body = "{ \"kernel_image_path\": \"string\", \"boot_args\": \"string\" }";
sender
.write_all(http_request("PUT", "/boot-source", Some(body)).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_put_drives() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
let mut connection = HttpConnection::new(receiver);
let body = "{ \"drive_id\": \"string\", \"path_on_host\": \"string\", \"is_root_device\": \
true, \"partuuid\": \"string\", \"is_read_only\": true, \"cache_type\": \
\"Unsafe\", \"io_engine\": \"Sync\", \"rate_limiter\": { \"bandwidth\": { \
\"size\": 0, \"one_time_burst\": 0, \"refill_time\": 0 }, \"ops\": { \
\"size\": 0, \"one_time_burst\": 0, \"refill_time\": 0 } } }";
sender
.write_all(http_request("PUT", "/drives/string", Some(body)).as_bytes())
.unwrap();
connection.try_read().unwrap();
let req = connection.pop_parsed_request().unwrap();
ParsedRequest::try_from(&req).unwrap();
}
#[test]
fn test_try_from_put_logger() {
let (mut sender, receiver) = UnixStream::pair().unwrap();
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/vsock.rs | src/firecracker/src/api_server/request/vsock.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::vsock::VsockDeviceConfig;
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
pub(crate) fn parse_put_vsock(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.vsock_count.inc();
let vsock_cfg = serde_json::from_slice::<VsockDeviceConfig>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.vsock_fails.inc();
})?;
// Check for the presence of deprecated `vsock_id` field.
let mut deprecation_message = None;
if vsock_cfg.vsock_id.is_some() {
// vsock_id field in request is deprecated.
METRICS.deprecated_api.deprecated_http_api_calls.inc();
deprecation_message = Some("PUT /vsock: vsock_id field is deprecated.");
}
// Construct the `ParsedRequest` object.
let mut parsed_req = ParsedRequest::new_sync(VmmAction::SetVsockDevice(vsock_cfg));
// If `vsock_id` was present, set the deprecation message in `parsing_info`.
if let Some(msg) = deprecation_message {
parsed_req.parsing_info().append_deprecation_message(msg);
}
Ok(parsed_req)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api_server::parsed_request::tests::depr_action_from_req;
#[test]
fn test_parse_put_vsock_request() {
let body = r#"{
"guest_cid": 42,
"uds_path": "vsock.sock"
}"#;
parse_put_vsock(&Body::new(body)).unwrap();
let body = r#"{
"guest_cid": 42,
"invalid_field": false
}"#;
parse_put_vsock(&Body::new(body)).unwrap_err();
}
#[test]
fn test_depr_vsock_id() {
let body = r#"{
"vsock_id": "foo",
"guest_cid": 42,
"uds_path": "vsock.sock"
}"#;
depr_action_from_req(
parse_put_vsock(&Body::new(body)).unwrap(),
Some("PUT /vsock: vsock_id field is deprecated.".to_string()),
);
let body = r#"{
"guest_cid": 42,
"uds_path": "vsock.sock"
}"#;
let (_, mut parsing_info) = parse_put_vsock(&Body::new(body)).unwrap().into_parts();
assert!(parsing_info.take_deprecation_message().is_none());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/entropy.rs | src/firecracker/src/api_server/request/entropy.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::entropy::EntropyDeviceConfig;
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
pub(crate) fn parse_put_entropy(body: &Body) -> Result<ParsedRequest, RequestError> {
let cfg = serde_json::from_slice::<EntropyDeviceConfig>(body.raw())?;
Ok(ParsedRequest::new_sync(VmmAction::SetEntropyDevice(cfg)))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_put_entropy_request() {
parse_put_entropy(&Body::new("invalid_payload")).unwrap_err();
// PUT with invalid fields.
let body = r#"{
"some_id": 4
}"#;
parse_put_entropy(&Body::new(body)).unwrap_err();
// PUT with valid fields.
let body = r#"{}"#;
parse_put_entropy(&Body::new(body)).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/version.rs | src/firecracker/src/api_server/request/version.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use super::super::parsed_request::{ParsedRequest, RequestError};
pub(crate) fn parse_get_version() -> Result<ParsedRequest, RequestError> {
METRICS.get_api_requests.vmm_version_count.inc();
Ok(ParsedRequest::new_sync(VmmAction::GetVmmVersion))
}
#[cfg(test)]
mod tests {
use super::super::super::parsed_request::RequestAction;
use super::*;
#[test]
fn test_parse_get_version_request() {
match parse_get_version().unwrap().into_parts() {
(RequestAction::Sync(action), _) if *action == VmmAction::GetVmmVersion => {}
_ => panic!("Test failed."),
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/logger.rs | src/firecracker/src/api_server/request/logger.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
pub(crate) fn parse_put_logger(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.logger_count.inc();
let res = serde_json::from_slice::<vmm::logger::LoggerConfig>(body.raw());
let config = res.inspect_err(|_| {
METRICS.put_api_requests.logger_fails.inc();
})?;
Ok(ParsedRequest::new_sync(VmmAction::ConfigureLogger(config)))
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use vmm::logger::{LevelFilter, LoggerConfig};
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_put_logger_request() {
let body = r#"{
"log_path": "log",
"level": "Warning",
"show_level": false,
"show_log_origin": false
}"#;
let expected_config = LoggerConfig {
log_path: Some(PathBuf::from("log")),
level: Some(LevelFilter::Warn),
show_level: Some(false),
show_log_origin: Some(false),
module: None,
};
assert_eq!(
vmm_action_from_request(parse_put_logger(&Body::new(body)).unwrap()),
VmmAction::ConfigureLogger(expected_config)
);
let body = r#"{
"log_path": "log",
"level": "DEBUG",
"show_level": false,
"show_log_origin": false
}"#;
let expected_config = LoggerConfig {
log_path: Some(PathBuf::from("log")),
level: Some(LevelFilter::Debug),
show_level: Some(false),
show_log_origin: Some(false),
module: None,
};
assert_eq!(
vmm_action_from_request(parse_put_logger(&Body::new(body)).unwrap()),
VmmAction::ConfigureLogger(expected_config)
);
let invalid_body = r#"{
"invalid_field": "log",
"level": "Warning",
"show_level": false,
"show_log_origin": false
}"#;
parse_put_logger(&Body::new(invalid_body)).unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/machine_configuration.rs | src/firecracker/src/api_server/request/machine_configuration.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::machine_config::{MachineConfig, MachineConfigUpdate};
use super::super::parsed_request::{ParsedRequest, RequestError, method_to_error};
use super::{Body, Method};
pub(crate) fn parse_get_machine_config() -> Result<ParsedRequest, RequestError> {
METRICS.get_api_requests.machine_cfg_count.inc();
Ok(ParsedRequest::new_sync(VmmAction::GetVmMachineConfig))
}
pub(crate) fn parse_put_machine_config(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.machine_cfg_count.inc();
let config = serde_json::from_slice::<MachineConfig>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.machine_cfg_fails.inc();
})?;
// Check for the presence of deprecated `cpu_template` field.
let mut deprecation_message = None;
if config.cpu_template.is_some() {
// `cpu_template` field in request is deprecated.
METRICS.deprecated_api.deprecated_http_api_calls.inc();
deprecation_message = Some("PUT /machine-config: cpu_template field is deprecated.");
}
// Convert `MachineConfig` to `MachineConfigUpdate`.
let config_update = MachineConfigUpdate::from(config);
// Construct the `ParsedRequest` object.
let mut parsed_req =
ParsedRequest::new_sync(VmmAction::UpdateMachineConfiguration(config_update));
// If `cpu_template` was present, set the deprecation message in `parsing_info`.
if let Some(msg) = deprecation_message {
parsed_req.parsing_info().append_deprecation_message(msg);
}
Ok(parsed_req)
}
pub(crate) fn parse_patch_machine_config(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.patch_api_requests.machine_cfg_count.inc();
let config_update =
serde_json::from_slice::<MachineConfigUpdate>(body.raw()).inspect_err(|_| {
METRICS.patch_api_requests.machine_cfg_fails.inc();
})?;
if config_update.is_empty() {
return method_to_error(Method::Patch);
}
// Check for the presence of deprecated `cpu_template` field.
let mut deprecation_message = None;
if config_update.cpu_template.is_some() {
// `cpu_template` field in request is deprecated.
METRICS.deprecated_api.deprecated_http_api_calls.inc();
deprecation_message = Some("PATCH /machine-config: cpu_template field is deprecated.");
}
// Construct the `ParsedRequest` object.
let mut parsed_req =
ParsedRequest::new_sync(VmmAction::UpdateMachineConfiguration(config_update));
// If `cpu_template` was present, set the deprecation message in `parsing_info`.
if let Some(msg) = deprecation_message {
parsed_req.parsing_info().append_deprecation_message(msg);
}
Ok(parsed_req)
}
#[cfg(test)]
mod tests {
use vmm::cpu_config::templates::StaticCpuTemplate;
use vmm::vmm_config::machine_config::HugePageConfig;
use super::*;
use crate::api_server::parsed_request::tests::{depr_action_from_req, vmm_action_from_request};
#[test]
fn test_parse_get_machine_config_request() {
parse_get_machine_config().unwrap();
assert!(METRICS.get_api_requests.machine_cfg_count.count() > 0);
}
#[test]
fn test_parse_put_machine_config_request() {
// 1. Test case for invalid payload.
parse_put_machine_config(&Body::new("invalid_payload")).unwrap_err();
assert!(METRICS.put_api_requests.machine_cfg_fails.count() > 0);
// 2. Test case for mandatory fields.
let body = r#"{
"mem_size_mib": 1024
}"#;
parse_put_machine_config(&Body::new(body)).unwrap_err();
let body = r#"{
"vcpu_count": 8
}"#;
parse_put_machine_config(&Body::new(body)).unwrap_err();
let huge_pages_cases = [
("None", HugePageConfig::None),
("2M", HugePageConfig::Hugetlbfs2M),
];
for (huge_page, expected) in huge_pages_cases {
// 3. Test case for success scenarios for both architectures.
let body = format!(
r#"{{
"vcpu_count": 8,
"mem_size_mib": 1024,
"huge_pages": "{huge_page}"
}}"#
);
let expected_config = MachineConfigUpdate {
vcpu_count: Some(8),
mem_size_mib: Some(1024),
smt: Some(false),
cpu_template: None,
track_dirty_pages: Some(false),
huge_pages: Some(expected),
#[cfg(feature = "gdb")]
gdb_socket_path: None,
};
assert_eq!(
vmm_action_from_request(parse_put_machine_config(&Body::new(body)).unwrap()),
VmmAction::UpdateMachineConfiguration(expected_config)
);
}
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024,
"cpu_template": "None"
}"#;
let expected_config = MachineConfigUpdate {
vcpu_count: Some(8),
mem_size_mib: Some(1024),
smt: Some(false),
cpu_template: Some(StaticCpuTemplate::None),
track_dirty_pages: Some(false),
huge_pages: Some(HugePageConfig::None),
#[cfg(feature = "gdb")]
gdb_socket_path: None,
};
assert_eq!(
vmm_action_from_request(parse_put_machine_config(&Body::new(body)).unwrap()),
VmmAction::UpdateMachineConfiguration(expected_config)
);
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024,
"smt": false,
"track_dirty_pages": true
}"#;
let expected_config = MachineConfigUpdate {
vcpu_count: Some(8),
mem_size_mib: Some(1024),
smt: Some(false),
cpu_template: None,
track_dirty_pages: Some(true),
huge_pages: Some(HugePageConfig::None),
#[cfg(feature = "gdb")]
gdb_socket_path: None,
};
assert_eq!(
vmm_action_from_request(parse_put_machine_config(&Body::new(body)).unwrap()),
VmmAction::UpdateMachineConfiguration(expected_config)
);
// 4. Test that applying a CPU template is successful on x86_64 while on aarch64, it is not.
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024,
"smt": false,
"cpu_template": "T2",
"track_dirty_pages": true
}"#;
#[cfg(target_arch = "x86_64")]
{
let expected_config = MachineConfigUpdate {
vcpu_count: Some(8),
mem_size_mib: Some(1024),
smt: Some(false),
cpu_template: Some(StaticCpuTemplate::T2),
track_dirty_pages: Some(true),
huge_pages: Some(HugePageConfig::None),
#[cfg(feature = "gdb")]
gdb_socket_path: None,
};
assert_eq!(
vmm_action_from_request(parse_put_machine_config(&Body::new(body)).unwrap()),
VmmAction::UpdateMachineConfiguration(expected_config)
);
}
#[cfg(target_arch = "aarch64")]
{
parse_put_machine_config(&Body::new(body)).unwrap_err();
}
// 5. Test that setting `smt: true` is successful
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024,
"smt": true,
"track_dirty_pages": true
}"#;
let expected_config = MachineConfigUpdate {
vcpu_count: Some(8),
mem_size_mib: Some(1024),
smt: Some(true),
cpu_template: None,
track_dirty_pages: Some(true),
huge_pages: Some(HugePageConfig::None),
#[cfg(feature = "gdb")]
gdb_socket_path: None,
};
assert_eq!(
vmm_action_from_request(parse_put_machine_config(&Body::new(body)).unwrap()),
VmmAction::UpdateMachineConfiguration(expected_config)
);
// 6. Test nonsense values for huge page size
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024,
"huge_pages": "7M"
}"#;
parse_put_machine_config(&Body::new(body)).unwrap_err();
}
#[test]
fn test_parse_patch_machine_config_request() {
// 1. Test cases for invalid payload.
parse_patch_machine_config(&Body::new("invalid_payload")).unwrap_err();
// 2. Check currently supported fields that can be patched.
let body = r#"{
"track_dirty_pages": true
}"#;
parse_patch_machine_config(&Body::new(body)).unwrap();
// On aarch64, CPU template is also not patch compatible.
let body = r#"{
"cpu_template": "T2"
}"#;
#[cfg(target_arch = "aarch64")]
parse_patch_machine_config(&Body::new(body)).unwrap_err();
#[cfg(target_arch = "x86_64")]
parse_patch_machine_config(&Body::new(body)).unwrap();
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024
}"#;
parse_patch_machine_config(&Body::new(body)).unwrap();
// On aarch64, we allow `smt` to be configured to `false` but not `true`.
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024,
"smt": false
}"#;
parse_patch_machine_config(&Body::new(body)).unwrap();
// 3. Check to see if an empty body returns an error.
let body = r#"{}"#;
parse_patch_machine_config(&Body::new(body)).unwrap_err();
}
#[test]
fn test_depr_cpu_template_in_put_req() {
// Test that the deprecation message is shown when `cpu_template` is specified.
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024,
"cpu_template": "None"
}"#;
depr_action_from_req(
parse_put_machine_config(&Body::new(body)).unwrap(),
Some("PUT /machine-config: cpu_template field is deprecated.".to_string()),
);
// Test that the deprecation message is not shown when `cpu_template` is not specified.
let body = r#"{
"vcpu_count": 8,
"mem_size_mib": 1024
}"#;
let (_, mut parsing_info) = parse_put_machine_config(&Body::new(body))
.unwrap()
.into_parts();
assert!(parsing_info.take_deprecation_message().is_none());
}
#[test]
fn test_depr_cpu_template_in_patch_req() {
// Test that the deprecation message is shown when `cpu_template` is specified.
let body = r#"{
"vcpu_count": 8,
"cpu_template": "None"
}"#;
depr_action_from_req(
parse_patch_machine_config(&Body::new(body)).unwrap(),
Some("PATCH /machine-config: cpu_template field is deprecated.".to_string()),
);
// Test that the deprecation message is not shown when `cpu_template` is not specified.
let body = r#"{
"vcpu_count": 8
}"#;
let (_, mut parsing_info) = parse_patch_machine_config(&Body::new(body))
.unwrap()
.into_parts();
assert!(parsing_info.take_deprecation_message().is_none());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/serial.rs | src/firecracker/src/api_server/request/serial.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use micro_http::Body;
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::serial::SerialConfig;
use crate::api_server::parsed_request::{ParsedRequest, RequestError};
pub(crate) fn parse_put_serial(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.serial_count.inc();
let res = serde_json::from_slice::<SerialConfig>(body.raw());
let config = res.inspect_err(|_| {
METRICS.put_api_requests.serial_fails.inc();
})?;
Ok(ParsedRequest::new_sync(VmmAction::ConfigureSerial(config)))
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_put_serial_request() {
let body = r#"{"serial_out_path": "serial"}"#;
let expected_config = SerialConfig {
serial_out_path: Some(PathBuf::from("serial")),
};
assert_eq!(
vmm_action_from_request(parse_put_serial(&Body::new(body)).unwrap()),
VmmAction::ConfigureSerial(expected_config)
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/pmem.rs | src/firecracker/src/api_server/request/pmem.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::pmem::PmemConfig;
use super::super::parsed_request::{ParsedRequest, RequestError, checked_id};
use super::{Body, StatusCode};
pub(crate) fn parse_put_pmem(
body: &Body,
id_from_path: Option<&str>,
) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.pmem_count.inc();
let id = if let Some(id) = id_from_path {
checked_id(id)?
} else {
METRICS.put_api_requests.pmem_fails.inc();
return Err(RequestError::EmptyID);
};
let device_cfg = serde_json::from_slice::<PmemConfig>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.pmem_fails.inc();
})?;
if id != device_cfg.id {
METRICS.put_api_requests.pmem_fails.inc();
Err(RequestError::Generic(
StatusCode::BadRequest,
"The id from the path does not match the id from the body!".to_string(),
))
} else {
Ok(ParsedRequest::new_sync(VmmAction::InsertPmemDevice(
device_cfg,
)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_put_pmem_request() {
parse_put_pmem(&Body::new("invalid_payload"), None).unwrap_err();
parse_put_pmem(&Body::new("invalid_payload"), Some("id")).unwrap_err();
let body = r#"{
"id": "bar",
}"#;
parse_put_pmem(&Body::new(body), Some("1")).unwrap_err();
let body = r#"{
"foo": "1",
}"#;
parse_put_pmem(&Body::new(body), Some("1")).unwrap_err();
let body = r#"{
"id": "1000",
"path_on_host": "dummy",
"root_device": true,
"read_only": true
}"#;
let r = vmm_action_from_request(parse_put_pmem(&Body::new(body), Some("1000")).unwrap());
let expected_config = PmemConfig {
id: "1000".to_string(),
path_on_host: "dummy".to_string(),
root_device: true,
read_only: true,
};
assert_eq!(r, VmmAction::InsertPmemDevice(expected_config));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/instance_info.rs | src/firecracker/src/api_server/request/instance_info.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use super::super::parsed_request::{ParsedRequest, RequestError};
pub(crate) fn parse_get_instance_info() -> Result<ParsedRequest, RequestError> {
METRICS.get_api_requests.instance_info_count.inc();
Ok(ParsedRequest::new_sync(VmmAction::GetVmInstanceInfo))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api_server::parsed_request::RequestAction;
#[test]
fn test_parse_get_instance_info_request() {
match parse_get_instance_info().unwrap().into_parts() {
(RequestAction::Sync(action), _) if *action == VmmAction::GetVmInstanceInfo => {}
_ => panic!("Test failed."),
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/mod.rs | src/firecracker/src/api_server/request/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod actions;
pub mod balloon;
pub mod boot_source;
pub mod cpu_configuration;
pub mod drive;
pub mod entropy;
pub mod hotplug;
pub mod instance_info;
pub mod logger;
pub mod machine_configuration;
pub mod metrics;
pub mod mmds;
pub mod net;
pub mod pmem;
pub mod serial;
pub mod snapshot;
pub mod version;
pub mod vsock;
pub use micro_http::{Body, Method, StatusCode};
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/mmds.rs | src/firecracker/src/api_server/request/mmds.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use micro_http::StatusCode;
use vmm::logger::{IncMetric, METRICS};
use vmm::mmds::data_store::MmdsVersion;
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::mmds::MmdsConfig;
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
pub(crate) fn parse_get_mmds() -> Result<ParsedRequest, RequestError> {
METRICS.get_api_requests.mmds_count.inc();
Ok(ParsedRequest::new_sync(VmmAction::GetMMDS))
}
fn parse_put_mmds_config(body: &Body) -> Result<ParsedRequest, RequestError> {
let config: MmdsConfig = serde_json::from_slice(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.mmds_fails.inc();
})?;
// Construct the `ParsedRequest` object.
let version = config.version;
let mut parsed_request = ParsedRequest::new_sync(VmmAction::SetMmdsConfiguration(config));
// MmdsV1 is deprecated.
if version == MmdsVersion::V1 {
METRICS.deprecated_api.deprecated_http_api_calls.inc();
parsed_request
.parsing_info()
.append_deprecation_message("PUT /mmds/config: V1 is deprecated. Use V2 instead.");
}
Ok(parsed_request)
}
pub(crate) fn parse_put_mmds(
body: &Body,
path_second_token: Option<&str>,
) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.mmds_count.inc();
match path_second_token {
None => Ok(ParsedRequest::new_sync(VmmAction::PutMMDS(
serde_json::from_slice(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.mmds_fails.inc();
})?,
))),
Some("config") => parse_put_mmds_config(body),
Some(unrecognized) => {
METRICS.put_api_requests.mmds_fails.inc();
Err(RequestError::Generic(
StatusCode::BadRequest,
format!("Unrecognized PUT request path `{}`.", unrecognized),
))
}
}
}
pub(crate) fn parse_patch_mmds(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.patch_api_requests.mmds_count.inc();
Ok(ParsedRequest::new_sync(VmmAction::PatchMMDS(
serde_json::from_slice(body.raw()).inspect_err(|_| {
METRICS.patch_api_requests.mmds_fails.inc();
})?,
)))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api_server::parsed_request::tests::depr_action_from_req;
#[test]
fn test_parse_get_mmds_request() {
parse_get_mmds().unwrap();
assert!(METRICS.get_api_requests.mmds_count.count() > 0);
}
#[test]
fn test_parse_put_mmds_request() {
let body = r#"{
"foo": "bar"
}"#;
parse_put_mmds(&Body::new(body), None).unwrap();
let invalid_body = "invalid_body";
parse_put_mmds(&Body::new(invalid_body), None).unwrap_err();
assert!(METRICS.put_api_requests.mmds_fails.count() > 0);
// Test `config` path.
let body = r#"{
"version": "V2",
"ipv4_address": "169.254.170.2",
"network_interfaces": []
}"#;
let config_path = "config";
parse_put_mmds(&Body::new(body), Some(config_path)).unwrap();
let body = r#"{
"network_interfaces": []
}"#;
parse_put_mmds(&Body::new(body), Some(config_path)).unwrap();
let body = r#"{
"version": "foo",
"ipv4_address": "169.254.170.2",
"network_interfaces": []
}"#;
parse_put_mmds(&Body::new(body), Some(config_path)).unwrap_err();
let body = r#"{
"version": "V2"
}"#;
parse_put_mmds(&Body::new(body), Some(config_path)).unwrap_err();
let body = r#"{
"ipv4_address": "",
"network_interfaces": []
}"#;
parse_put_mmds(&Body::new(body), Some(config_path)).unwrap_err();
let invalid_config_body = r#"{
"invalid_config": "invalid_value"
}"#;
parse_put_mmds(&Body::new(invalid_config_body), Some(config_path)).unwrap_err();
parse_put_mmds(&Body::new(body), Some("invalid_path")).unwrap_err();
parse_put_mmds(&Body::new(invalid_body), Some(config_path)).unwrap_err();
}
#[test]
fn test_deprecated_config() {
let config_path = "config";
let body = r#"{
"ipv4_address": "169.254.170.2",
"network_interfaces": []
}"#;
depr_action_from_req(
parse_put_mmds(&Body::new(body), Some(config_path)).unwrap(),
Some("PUT /mmds/config: V1 is deprecated. Use V2 instead.".to_string()),
);
let body = r#"{
"version": "V1",
"ipv4_address": "169.254.170.2",
"network_interfaces": []
}"#;
depr_action_from_req(
parse_put_mmds(&Body::new(body), Some(config_path)).unwrap(),
Some("PUT /mmds/config: V1 is deprecated. Use V2 instead.".to_string()),
);
let body = r#"{
"version": "V2",
"ipv4_address": "169.254.170.2",
"network_interfaces": []
}"#;
let (_, mut parsing_info) = parse_put_mmds(&Body::new(body), Some(config_path))
.unwrap()
.into_parts();
assert!(parsing_info.take_deprecation_message().is_none());
}
#[test]
fn test_parse_patch_mmds_request() {
let body = r#"{
"foo": "bar"
}"#;
parse_patch_mmds(&Body::new(body)).unwrap();
assert!(METRICS.patch_api_requests.mmds_count.count() > 0);
parse_patch_mmds(&Body::new("invalid_body")).unwrap_err();
assert!(METRICS.patch_api_requests.mmds_fails.count() > 0);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/boot_source.rs | src/firecracker/src/api_server/request/boot_source.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::boot_source::BootSourceConfig;
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
pub(crate) fn parse_put_boot_source(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.boot_source_count.inc();
Ok(ParsedRequest::new_sync(VmmAction::ConfigureBootSource(
serde_json::from_slice::<BootSourceConfig>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.boot_source_fails.inc();
})?,
)))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_boot_request() {
parse_put_boot_source(&Body::new("invalid_payload")).unwrap_err();
let body = r#"{
"kernel_image_path": "/foo/bar",
"initrd_path": "/bar/foo",
"boot_args": "foobar"
}"#;
let same_body = BootSourceConfig {
kernel_image_path: String::from("/foo/bar"),
initrd_path: Some(String::from("/bar/foo")),
boot_args: Some(String::from("foobar")),
};
let parsed_req = parse_put_boot_source(&Body::new(body)).unwrap();
assert_eq!(
parsed_req,
ParsedRequest::new_sync(VmmAction::ConfigureBootSource(same_body))
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/actions.rs | src/firecracker/src/api_server/request/actions.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::{Deserialize, Serialize};
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
#[cfg(target_arch = "aarch64")]
use super::StatusCode;
// The names of the members from this enum must precisely correspond (as a string) to the possible
// values of "action_type" from the json request body. This is useful to get a strongly typed
// struct from the Serde deserialization process.
#[derive(Debug, Deserialize, Serialize)]
enum ActionType {
FlushMetrics,
InstanceStart,
SendCtrlAltDel,
}
// The model of the json body from a sync request. We use Serde to transform each associated
// json body into this.
#[derive(Debug, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
struct ActionBody {
action_type: ActionType,
}
pub(crate) fn parse_put_actions(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.actions_count.inc();
let action_body = serde_json::from_slice::<ActionBody>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.actions_fails.inc();
})?;
match action_body.action_type {
ActionType::FlushMetrics => Ok(ParsedRequest::new_sync(VmmAction::FlushMetrics)),
ActionType::InstanceStart => Ok(ParsedRequest::new_sync(VmmAction::StartMicroVm)),
ActionType::SendCtrlAltDel => {
// SendCtrlAltDel not supported on aarch64.
#[cfg(target_arch = "aarch64")]
return Err(RequestError::Generic(
StatusCode::BadRequest,
"SendCtrlAltDel does not supported on aarch64.".to_string(),
));
#[cfg(target_arch = "x86_64")]
Ok(ParsedRequest::new_sync(VmmAction::SendCtrlAltDel))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_put_actions_request() {
{
parse_put_actions(&Body::new("invalid_body")).unwrap_err();
let json = r#"{
"action_type": "InstanceStart"
}"#;
let req: ParsedRequest = ParsedRequest::new_sync(VmmAction::StartMicroVm);
let result = parse_put_actions(&Body::new(json));
assert_eq!(result.unwrap(), req);
}
#[cfg(target_arch = "x86_64")]
{
let json = r#"{
"action_type": "SendCtrlAltDel"
}"#;
let req: ParsedRequest = ParsedRequest::new_sync(VmmAction::SendCtrlAltDel);
let result = parse_put_actions(&Body::new(json));
assert_eq!(result.unwrap(), req);
}
#[cfg(target_arch = "aarch64")]
{
let json = r#"{
"action_type": "SendCtrlAltDel"
}"#;
let result = parse_put_actions(&Body::new(json));
result.unwrap_err();
}
{
let json = r#"{
"action_type": "FlushMetrics"
}"#;
let req: ParsedRequest = ParsedRequest::new_sync(VmmAction::FlushMetrics);
let result = parse_put_actions(&Body::new(json));
assert_eq!(result.unwrap(), req);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/drive.rs | src/firecracker/src/api_server/request/drive.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::drive::{BlockDeviceConfig, BlockDeviceUpdateConfig};
use super::super::parsed_request::{ParsedRequest, RequestError, checked_id};
use super::{Body, StatusCode};
pub(crate) fn parse_put_drive(
body: &Body,
id_from_path: Option<&str>,
) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.drive_count.inc();
let id = if let Some(id) = id_from_path {
checked_id(id)?
} else {
METRICS.put_api_requests.drive_fails.inc();
return Err(RequestError::EmptyID);
};
let device_cfg = serde_json::from_slice::<BlockDeviceConfig>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.drive_fails.inc();
})?;
if id != device_cfg.drive_id {
METRICS.put_api_requests.drive_fails.inc();
Err(RequestError::Generic(
StatusCode::BadRequest,
"The id from the path does not match the id from the body!".to_string(),
))
} else {
Ok(ParsedRequest::new_sync(VmmAction::InsertBlockDevice(
device_cfg,
)))
}
}
pub(crate) fn parse_patch_drive(
body: &Body,
id_from_path: Option<&str>,
) -> Result<ParsedRequest, RequestError> {
METRICS.patch_api_requests.drive_count.inc();
let id = if let Some(id) = id_from_path {
checked_id(id)?
} else {
METRICS.patch_api_requests.drive_fails.inc();
return Err(RequestError::EmptyID);
};
let block_device_update_cfg: BlockDeviceUpdateConfig =
serde_json::from_slice::<BlockDeviceUpdateConfig>(body.raw()).inspect_err(|_| {
METRICS.patch_api_requests.drive_fails.inc();
})?;
if id != block_device_update_cfg.drive_id {
METRICS.patch_api_requests.drive_fails.inc();
return Err(RequestError::Generic(
StatusCode::BadRequest,
String::from("The id from the path does not match the id from the body!"),
));
}
Ok(ParsedRequest::new_sync(VmmAction::UpdateBlockDevice(
block_device_update_cfg,
)))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_patch_drive_request() {
parse_patch_drive(&Body::new("invalid_payload"), None).unwrap_err();
parse_patch_drive(&Body::new("invalid_payload"), Some("id")).unwrap_err();
// PATCH with invalid fields.
let body = r#"{
"drive_id": "bar",
"is_read_only": false
}"#;
parse_patch_drive(&Body::new(body), Some("2")).unwrap_err();
// PATCH with invalid types on fields. Adding a drive_id as number instead of string.
let body = r#"{
"drive_id": 1000,
"path_on_host": "dummy"
}"#;
let res = parse_patch_drive(&Body::new(body), Some("1000"));
res.unwrap_err();
// PATCH with invalid types on fields. Adding a path_on_host as bool instead of string.
let body = r#"{
"drive_id": 1000,
"path_on_host": true
}"#;
let res = parse_patch_drive(&Body::new(body), Some("1000"));
res.unwrap_err();
// PATCH with only drive_id field.
let body = r#"{
"drive_id": "1000"
}"#;
let res = parse_patch_drive(&Body::new(body), Some("1000"));
res.unwrap();
// PATCH with missing drive_id field.
let body = r#"{
"path_on_host": true
}"#;
let res = parse_patch_drive(&Body::new(body), Some("1000"));
res.unwrap_err();
// PATCH that tries to update something else other than path_on_host.
let body = r#"{
"drive_id": "dummy_id",
"path_on_host": "dummy_host",
"is_read_only": false
}"#;
let res = parse_patch_drive(&Body::new(body), Some("1234"));
res.unwrap_err();
// PATCH with payload that is not a json.
let body = r#"{
"fields": "dummy_field"
}"#;
parse_patch_drive(&Body::new(body), Some("1234")).unwrap_err();
let body = r#"{
"drive_id": "foo",
"path_on_host": "dummy"
}"#;
let expected_config = BlockDeviceUpdateConfig {
drive_id: "foo".to_string(),
path_on_host: Some("dummy".to_string()),
rate_limiter: None,
};
assert_eq!(
vmm_action_from_request(parse_patch_drive(&Body::new(body), Some("foo")).unwrap()),
VmmAction::UpdateBlockDevice(expected_config)
);
let body = r#"{
"drive_id": "foo",
"path_on_host": "dummy"
}"#;
// Must fail since the drive id differs from id_from_path (foo vs bar).
parse_patch_drive(&Body::new(body), Some("bar")).unwrap_err();
let body = r#"{
"drive_id": "foo",
"rate_limiter": {
"bandwidth": {
"size": 5000,
"refill_time": 100
},
"ops": {
"size": 500,
"refill_time": 100
}
}
}"#;
// Validate that updating just the ratelimiter works.
parse_patch_drive(&Body::new(body), Some("foo")).unwrap();
let body = r#"{
"drive_id": "foo",
"path_on_host": "/there",
"rate_limiter": {
"bandwidth": {
"size": 5000,
"refill_time": 100
},
"ops": {
"size": 500,
"refill_time": 100
}
}
}"#;
// Validate that updating both path and rate limiter succeds.
parse_patch_drive(&Body::new(body), Some("foo")).unwrap();
let body = r#"{
"drive_id": "foo",
"path_on_host": "/there",
"rate_limiter": {
"ops": {
"size": 100
}
}
}"#;
// Validate that parse_patch_drive fails for invalid rate limiter cfg.
parse_patch_drive(&Body::new(body), Some("foo")).unwrap_err();
}
#[test]
fn test_parse_put_drive_request() {
parse_put_drive(&Body::new("invalid_payload"), None).unwrap_err();
parse_put_drive(&Body::new("invalid_payload"), Some("id")).unwrap_err();
// PUT with invalid fields.
let body = r#"{
"drive_id": "bar",
"is_read_only": false
}"#;
parse_put_drive(&Body::new(body), Some("2")).unwrap_err();
// PUT with missing all optional fields.
let body = r#"{
"drive_id": "1000",
"path_on_host": "dummy",
"is_root_device": true,
"is_read_only": true
}"#;
parse_put_drive(&Body::new(body), Some("1000")).unwrap();
// PUT with invalid types on fields. Adding a drive_id as number instead of string.
parse_put_drive(&Body::new(body), Some("foo")).unwrap_err();
// PUT with the complete configuration.
let body = r#"{
"drive_id": "1000",
"path_on_host": "dummy",
"is_root_device": true,
"partuuid": "string",
"is_read_only": true,
"cache_type": "Unsafe",
"io_engine": "Sync",
"rate_limiter": {
"bandwidth": {
"size": 0,
"one_time_burst": 0,
"refill_time": 0
},
"ops": {
"size": 0,
"one_time_burst": 0,
"refill_time": 0
}
}
}"#;
parse_put_drive(&Body::new(body), Some("1000")).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/metrics.rs | src/firecracker/src/api_server/request/metrics.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::metrics::MetricsConfig;
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
pub(crate) fn parse_put_metrics(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.metrics_count.inc();
Ok(ParsedRequest::new_sync(VmmAction::ConfigureMetrics(
serde_json::from_slice::<MetricsConfig>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.metrics_fails.inc();
})?,
)))
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_put_metrics_request() {
let body = r#"{
"metrics_path": "metrics"
}"#;
let expected_config = MetricsConfig {
metrics_path: PathBuf::from("metrics"),
};
assert_eq!(
vmm_action_from_request(parse_put_metrics(&Body::new(body)).unwrap()),
VmmAction::ConfigureMetrics(expected_config)
);
let invalid_body = r#"{
"invalid_field": "metrics"
}"#;
parse_put_metrics(&Body::new(invalid_body)).unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/snapshot.rs | src/firecracker/src/api_server/request/snapshot.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::de::Error as DeserializeError;
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::snapshot::{
CreateSnapshotParams, LoadSnapshotConfig, LoadSnapshotParams, MemBackendConfig, MemBackendType,
Vm, VmState,
};
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::super::request::{Body, Method, StatusCode};
/// Deprecation message for the `mem_file_path` field.
const LOAD_DEPRECATION_MESSAGE: &str =
"PUT /snapshot/load: mem_file_path and enable_diff_snapshots fields are deprecated.";
/// None of the `mem_backend` or `mem_file_path` fields has been specified.
pub const MISSING_FIELD: &str =
"missing field: either `mem_backend` or `mem_file_path` is required";
/// Both the `mem_backend` and `mem_file_path` fields have been specified.
/// Only specifying one of them is allowed.
pub const TOO_MANY_FIELDS: &str =
"too many fields: either `mem_backend` or `mem_file_path` exclusively is required";
pub(crate) fn parse_put_snapshot(
body: &Body,
request_type_from_path: Option<&str>,
) -> Result<ParsedRequest, RequestError> {
match request_type_from_path {
Some(request_type) => match request_type {
"create" => parse_put_snapshot_create(body),
"load" => parse_put_snapshot_load(body),
_ => Err(RequestError::InvalidPathMethod(
format!("/snapshot/{}", request_type),
Method::Put,
)),
},
None => Err(RequestError::Generic(
StatusCode::BadRequest,
"Missing snapshot operation type.".to_string(),
)),
}
}
pub(crate) fn parse_patch_vm_state(body: &Body) -> Result<ParsedRequest, RequestError> {
let vm = serde_json::from_slice::<Vm>(body.raw())?;
match vm.state {
VmState::Paused => Ok(ParsedRequest::new_sync(VmmAction::Pause)),
VmState::Resumed => Ok(ParsedRequest::new_sync(VmmAction::Resume)),
}
}
fn parse_put_snapshot_create(body: &Body) -> Result<ParsedRequest, RequestError> {
let snapshot_config = serde_json::from_slice::<CreateSnapshotParams>(body.raw())?;
Ok(ParsedRequest::new_sync(VmmAction::CreateSnapshot(
snapshot_config,
)))
}
fn parse_put_snapshot_load(body: &Body) -> Result<ParsedRequest, RequestError> {
let snapshot_config = serde_json::from_slice::<LoadSnapshotConfig>(body.raw())?;
match (&snapshot_config.mem_backend, &snapshot_config.mem_file_path) {
// Ensure `mem_file_path` and `mem_backend` fields are not present at the same time.
(Some(_), Some(_)) => {
return Err(RequestError::SerdeJson(serde_json::Error::custom(
TOO_MANY_FIELDS,
)));
}
// Ensure that one of `mem_file_path` or `mem_backend` fields is always specified.
(None, None) => {
return Err(RequestError::SerdeJson(serde_json::Error::custom(
MISSING_FIELD,
)));
}
_ => {}
}
// Check for the presence of deprecated `mem_file_path` field and create
// deprecation message if found.
let mut deprecation_message = None;
#[allow(deprecated)]
if snapshot_config.mem_file_path.is_some() || snapshot_config.enable_diff_snapshots {
// `mem_file_path` field in request is deprecated.
METRICS.deprecated_api.deprecated_http_api_calls.inc();
deprecation_message = Some(LOAD_DEPRECATION_MESSAGE);
}
// If `mem_file_path` is specified instead of `mem_backend`, we construct the
// `MemBackendConfig` object from the path specified, with `File` as backend type.
let mem_backend = match snapshot_config.mem_backend {
Some(backend_cfg) => backend_cfg,
None => {
MemBackendConfig {
// This is safe to unwrap() because we ensure above that one of the two:
// either `mem_file_path` or `mem_backend` field is always specified.
backend_path: snapshot_config.mem_file_path.unwrap(),
backend_type: MemBackendType::File,
}
}
};
let snapshot_params = LoadSnapshotParams {
snapshot_path: snapshot_config.snapshot_path,
mem_backend,
#[allow(deprecated)]
track_dirty_pages: snapshot_config.enable_diff_snapshots
|| snapshot_config.track_dirty_pages,
resume_vm: snapshot_config.resume_vm,
network_overrides: snapshot_config.network_overrides,
};
// Construct the `ParsedRequest` object.
let mut parsed_req = ParsedRequest::new_sync(VmmAction::LoadSnapshot(snapshot_params));
// If `mem_file_path` was present, set the deprecation message in `parsing_info`.
if let Some(msg) = deprecation_message {
parsed_req.parsing_info().append_deprecation_message(msg);
}
Ok(parsed_req)
}
#[cfg(test)]
mod tests {
use vmm::vmm_config::snapshot::{MemBackendConfig, MemBackendType, NetworkOverride};
use super::*;
use crate::api_server::parsed_request::tests::{depr_action_from_req, vmm_action_from_request};
#[test]
fn test_parse_put_snapshot() {
use std::path::PathBuf;
use vmm::vmm_config::snapshot::SnapshotType;
let body = r#"{
"snapshot_type": "Diff",
"snapshot_path": "foo",
"mem_file_path": "bar"
}"#;
let expected_config = CreateSnapshotParams {
snapshot_type: SnapshotType::Diff,
snapshot_path: PathBuf::from("foo"),
mem_file_path: PathBuf::from("bar"),
};
assert_eq!(
vmm_action_from_request(parse_put_snapshot(&Body::new(body), Some("create")).unwrap()),
VmmAction::CreateSnapshot(expected_config)
);
let body = r#"{
"snapshot_path": "foo",
"mem_file_path": "bar"
}"#;
let expected_config = CreateSnapshotParams {
snapshot_type: SnapshotType::Full,
snapshot_path: PathBuf::from("foo"),
mem_file_path: PathBuf::from("bar"),
};
assert_eq!(
vmm_action_from_request(parse_put_snapshot(&Body::new(body), Some("create")).unwrap()),
VmmAction::CreateSnapshot(expected_config)
);
let invalid_body = r#"{
"invalid_field": "foo",
"mem_file_path": "bar"
}"#;
parse_put_snapshot(&Body::new(invalid_body), Some("create")).unwrap_err();
let body = r#"{
"snapshot_path": "foo",
"mem_backend": {
"backend_path": "bar",
"backend_type": "File"
}
}"#;
let expected_config = LoadSnapshotParams {
snapshot_path: PathBuf::from("foo"),
mem_backend: MemBackendConfig {
backend_path: PathBuf::from("bar"),
backend_type: MemBackendType::File,
},
track_dirty_pages: false,
resume_vm: false,
network_overrides: vec![],
};
let mut parsed_request = parse_put_snapshot(&Body::new(body), Some("load")).unwrap();
assert!(
parsed_request
.parsing_info()
.take_deprecation_message()
.is_none()
);
assert_eq!(
vmm_action_from_request(parsed_request),
VmmAction::LoadSnapshot(expected_config)
);
let body = r#"{
"snapshot_path": "foo",
"mem_backend": {
"backend_path": "bar",
"backend_type": "File"
},
"track_dirty_pages": true
}"#;
let expected_config = LoadSnapshotParams {
snapshot_path: PathBuf::from("foo"),
mem_backend: MemBackendConfig {
backend_path: PathBuf::from("bar"),
backend_type: MemBackendType::File,
},
track_dirty_pages: true,
resume_vm: false,
network_overrides: vec![],
};
let mut parsed_request = parse_put_snapshot(&Body::new(body), Some("load")).unwrap();
assert!(
parsed_request
.parsing_info()
.take_deprecation_message()
.is_none()
);
assert_eq!(
vmm_action_from_request(parsed_request),
VmmAction::LoadSnapshot(expected_config)
);
let body = r#"{
"snapshot_path": "foo",
"mem_backend": {
"backend_path": "bar",
"backend_type": "Uffd"
},
"resume_vm": true
}"#;
let expected_config = LoadSnapshotParams {
snapshot_path: PathBuf::from("foo"),
mem_backend: MemBackendConfig {
backend_path: PathBuf::from("bar"),
backend_type: MemBackendType::Uffd,
},
track_dirty_pages: false,
resume_vm: true,
network_overrides: vec![],
};
let mut parsed_request = parse_put_snapshot(&Body::new(body), Some("load")).unwrap();
assert!(
parsed_request
.parsing_info()
.take_deprecation_message()
.is_none()
);
assert_eq!(
vmm_action_from_request(parsed_request),
VmmAction::LoadSnapshot(expected_config)
);
let body = r#"{
"snapshot_path": "foo",
"mem_backend": {
"backend_path": "bar",
"backend_type": "Uffd"
},
"resume_vm": true,
"network_overrides": [
{
"iface_id": "eth0",
"host_dev_name": "vmtap2"
}
]
}"#;
let expected_config = LoadSnapshotParams {
snapshot_path: PathBuf::from("foo"),
mem_backend: MemBackendConfig {
backend_path: PathBuf::from("bar"),
backend_type: MemBackendType::Uffd,
},
track_dirty_pages: false,
resume_vm: true,
network_overrides: vec![NetworkOverride {
iface_id: String::from("eth0"),
host_dev_name: String::from("vmtap2"),
}],
};
let mut parsed_request = parse_put_snapshot(&Body::new(body), Some("load")).unwrap();
assert!(
parsed_request
.parsing_info()
.take_deprecation_message()
.is_none()
);
assert_eq!(
vmm_action_from_request(parsed_request),
VmmAction::LoadSnapshot(expected_config)
);
let body = r#"{
"snapshot_path": "foo",
"mem_file_path": "bar",
"resume_vm": true
}"#;
let expected_config = LoadSnapshotParams {
snapshot_path: PathBuf::from("foo"),
mem_backend: MemBackendConfig {
backend_path: PathBuf::from("bar"),
backend_type: MemBackendType::File,
},
track_dirty_pages: false,
resume_vm: true,
network_overrides: vec![],
};
let parsed_request = parse_put_snapshot(&Body::new(body), Some("load")).unwrap();
assert_eq!(
depr_action_from_req(parsed_request, Some(LOAD_DEPRECATION_MESSAGE.to_string())),
VmmAction::LoadSnapshot(expected_config)
);
let body = r#"{
"snapshot_path": "foo",
"mem_backend": {
"backend_path": "bar"
}
}"#;
assert_eq!(
parse_put_snapshot(&Body::new(body), Some("load"))
.err()
.unwrap()
.to_string(),
"An error occurred when deserializing the json body of a request: missing field \
`backend_type` at line 5 column 13."
);
let body = r#"{
"snapshot_path": "foo",
"mem_backend": {
"backend_type": "File",
}
}"#;
assert_eq!(
parse_put_snapshot(&Body::new(body), Some("load"))
.err()
.unwrap()
.to_string(),
"An error occurred when deserializing the json body of a request: trailing comma at \
line 5 column 13."
);
let body = r#"{
"snapshot_path": "foo",
"mem_file_path": "bar",
"mem_backend": {
"backend_path": "bar",
"backend_type": "Uffd"
}
}"#;
assert_eq!(
parse_put_snapshot(&Body::new(body), Some("load"))
.err()
.unwrap()
.to_string(),
RequestError::SerdeJson(serde_json::Error::custom(TOO_MANY_FIELDS.to_string()))
.to_string()
);
let body = r#"{
"snapshot_path": "foo"
}"#;
assert_eq!(
parse_put_snapshot(&Body::new(body), Some("load"))
.err()
.unwrap()
.to_string(),
RequestError::SerdeJson(serde_json::Error::custom(MISSING_FIELD.to_string()))
.to_string()
);
let body = r#"{
"mem_backend": {
"backend_path": "bar",
"backend_type": "Uffd"
}
}"#;
assert_eq!(
parse_put_snapshot(&Body::new(body), Some("load"))
.err()
.unwrap()
.to_string(),
"An error occurred when deserializing the json body of a request: missing field \
`snapshot_path` at line 6 column 9."
);
parse_put_snapshot(&Body::new(body), Some("invalid")).unwrap_err();
parse_put_snapshot(&Body::new(body), None).unwrap_err();
}
#[test]
fn test_parse_patch_vm_state() {
let body = r#"{
"state": "Paused"
}"#;
assert!(
parse_patch_vm_state(&Body::new(body))
.unwrap()
.eq(&ParsedRequest::new_sync(VmmAction::Pause))
);
let body = r#"{
"state": "Resumed"
}"#;
assert!(
parse_patch_vm_state(&Body::new(body))
.unwrap()
.eq(&ParsedRequest::new_sync(VmmAction::Resume))
);
let invalid_body = r#"{
"invalid": "Paused"
}"#;
parse_patch_vm_state(&Body::new(invalid_body)).unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/cpu_configuration.rs | src/firecracker/src/api_server/request/cpu_configuration.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::cpu_config::templates::CustomCpuTemplate;
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
pub(crate) fn parse_put_cpu_config(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.cpu_cfg_count.inc();
// Convert the API request into a a deserialized/binary format
Ok(ParsedRequest::new_sync(VmmAction::PutCpuConfiguration(
CustomCpuTemplate::try_from(body.raw()).map_err(|err| {
METRICS.put_api_requests.cpu_cfg_fails.inc();
RequestError::SerdeJson(err)
})?,
)))
}
#[cfg(test)]
mod tests {
use micro_http::Body;
use vmm::cpu_config::templates::test_utils::{TEST_INVALID_TEMPLATE_JSON, build_test_template};
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_put_cpu_config_request() {
let cpu_template = build_test_template();
let cpu_config_json_result = serde_json::to_string(&cpu_template);
assert!(
&cpu_config_json_result.is_ok(),
"Unable to serialize CPU template to JSON"
);
let cpu_template_json = cpu_config_json_result.unwrap();
// Test that the CPU config to be used for KVM config is the same that
// was read in from a test file.
assert_eq!(
vmm_action_from_request(
parse_put_cpu_config(&Body::new(cpu_template_json.as_bytes())).unwrap()
),
VmmAction::PutCpuConfiguration(cpu_template)
);
// Test empty request succeeds
let parse_cpu_config_result = parse_put_cpu_config(&Body::new(r#"{ }"#));
assert!(
parse_cpu_config_result.is_ok(),
"Failed to parse cpu-config: [{}]",
parse_cpu_config_result.unwrap_err()
);
}
/// Test basic API server validations like JSON sanity/legibility
/// Any testing or validation done involving KVM or OS specific context
/// need to be done in integration testing (api_cpu_configuration_integ_tests)
#[test]
fn test_parse_put_cpu_config_request_errors() {
let mut expected_err_count = METRICS.put_api_requests.cpu_cfg_fails.count() + 1;
// Test case for invalid payload
let unparsable_cpu_config_result =
parse_put_cpu_config(&Body::new("<unparseable_payload>"));
unparsable_cpu_config_result.unwrap_err();
assert_eq!(
METRICS.put_api_requests.cpu_cfg_fails.count(),
expected_err_count
);
// Test request with invalid fields
let invalid_put_result = parse_put_cpu_config(&Body::new(TEST_INVALID_TEMPLATE_JSON));
expected_err_count += 1;
assert_eq!(
METRICS.put_api_requests.cpu_cfg_fails.count(),
expected_err_count
);
assert!(
matches!(invalid_put_result, Err(RequestError::SerdeJson(_))),
"{:?}",
invalid_put_result
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/net.rs | src/firecracker/src/api_server/request/net.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::net::{NetworkInterfaceConfig, NetworkInterfaceUpdateConfig};
use super::super::parsed_request::{ParsedRequest, RequestError, checked_id};
use super::{Body, StatusCode};
pub(crate) fn parse_put_net(
body: &Body,
id_from_path: Option<&str>,
) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.network_count.inc();
let id = if let Some(id) = id_from_path {
checked_id(id)?
} else {
METRICS.put_api_requests.network_fails.inc();
return Err(RequestError::EmptyID);
};
let netif = serde_json::from_slice::<NetworkInterfaceConfig>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.network_fails.inc();
})?;
if id != netif.iface_id.as_str() {
METRICS.put_api_requests.network_fails.inc();
return Err(RequestError::Generic(
StatusCode::BadRequest,
format!(
"The id from the path [{}] does not match the id from the body [{}]!",
id,
netif.iface_id.as_str()
),
));
}
Ok(ParsedRequest::new_sync(VmmAction::InsertNetworkDevice(
netif,
)))
}
pub(crate) fn parse_patch_net(
body: &Body,
id_from_path: Option<&str>,
) -> Result<ParsedRequest, RequestError> {
METRICS.patch_api_requests.network_count.inc();
let id = if let Some(id) = id_from_path {
checked_id(id)?
} else {
METRICS.patch_api_requests.network_count.inc();
return Err(RequestError::EmptyID);
};
let netif =
serde_json::from_slice::<NetworkInterfaceUpdateConfig>(body.raw()).inspect_err(|_| {
METRICS.patch_api_requests.network_fails.inc();
})?;
if id != netif.iface_id {
METRICS.patch_api_requests.network_count.inc();
return Err(RequestError::Generic(
StatusCode::BadRequest,
format!(
"The id from the path [{}] does not match the id from the body [{}]!",
id,
netif.iface_id.as_str()
),
));
}
Ok(ParsedRequest::new_sync(VmmAction::UpdateNetworkInterface(
netif,
)))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_put_net_request() {
let body = r#"{
"iface_id": "foo",
"host_dev_name": "bar",
"guest_mac": "12:34:56:78:9A:BC"
}"#;
// 1. Exercise infamous "The id from the path does not match id from the body!".
parse_put_net(&Body::new(body), Some("bar")).unwrap_err();
// 2. The `id_from_path` cannot be None.
parse_put_net(&Body::new(body), None).unwrap_err();
// 3. Success case.
let expected_config = serde_json::from_str::<NetworkInterfaceConfig>(body).unwrap();
assert_eq!(
vmm_action_from_request(parse_put_net(&Body::new(body), Some("foo")).unwrap()),
VmmAction::InsertNetworkDevice(expected_config)
);
// 4. Serde error for invalid field (bytes instead of bandwidth).
let body = r#"{
"iface_id": "foo",
"rx_rate_limiter": {
"bytes": {
"size": 62500,
"refill_time": 1000
}
},
"tx_rate_limiter": {
"bytes": {
"size": 62500,
"refill_time": 1000
}
}
}"#;
parse_put_net(&Body::new(body), Some("foo")).unwrap_err();
}
#[test]
fn test_parse_patch_net_request() {
let body = r#"{
"iface_id": "foo",
"rx_rate_limiter": {},
"tx_rate_limiter": {}
}"#;
// 1. Exercise infamous "The id from the path does not match id from the body!".
parse_patch_net(&Body::new(body), Some("bar")).unwrap_err();
// 2. The `id_from_path` cannot be None.
parse_patch_net(&Body::new(body), None).unwrap_err();
// 3. Success case.
let expected_config = serde_json::from_str::<NetworkInterfaceUpdateConfig>(body).unwrap();
assert_eq!(
vmm_action_from_request(parse_patch_net(&Body::new(body), Some("foo")).unwrap()),
VmmAction::UpdateNetworkInterface(expected_config)
);
// 4. Serde error for invalid field (bytes instead of bandwidth).
let body = r#"{
"iface_id": "foo",
"rx_rate_limiter": {
"bytes": {
"size": 62500,
"refill_time": 1000
}
},
"tx_rate_limiter": {
"bytes": {
"size": 62500,
"refill_time": 1000
}
}
}"#;
parse_patch_net(&Body::new(body), Some("foo")).unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/balloon.rs | src/firecracker/src/api_server/request/balloon.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use micro_http::{Method, StatusCode};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::balloon::{
BalloonDeviceConfig, BalloonUpdateConfig, BalloonUpdateStatsConfig,
};
use super::super::parsed_request::{ParsedRequest, RequestError};
use super::Body;
use crate::api_server::parsed_request::method_to_error;
fn parse_get_hinting<'a, T>(mut path_tokens: T) -> Result<ParsedRequest, RequestError>
where
T: Iterator<Item = &'a str>,
{
match path_tokens.next() {
Some("status") => Ok(ParsedRequest::new_sync(VmmAction::GetFreePageHintingStatus)),
Some(stats_path) => Err(RequestError::Generic(
StatusCode::BadRequest,
format!("Unrecognized GET request path `/hinting/{stats_path}`."),
)),
None => Err(RequestError::Generic(
StatusCode::BadRequest,
"Unrecognized GET request path `/hinting/`.".to_string(),
)),
}
}
pub(crate) fn parse_get_balloon<'a, T>(mut path_tokens: T) -> Result<ParsedRequest, RequestError>
where
T: Iterator<Item = &'a str>,
{
match path_tokens.next() {
Some("statistics") => Ok(ParsedRequest::new_sync(VmmAction::GetBalloonStats)),
Some("hinting") => parse_get_hinting(path_tokens),
Some(stats_path) => Err(RequestError::Generic(
StatusCode::BadRequest,
format!("Unrecognized GET request path `{}`.", stats_path),
)),
None => Ok(ParsedRequest::new_sync(VmmAction::GetBalloonConfig)),
}
}
pub(crate) fn parse_put_balloon(body: &Body) -> Result<ParsedRequest, RequestError> {
Ok(ParsedRequest::new_sync(VmmAction::SetBalloonDevice(
serde_json::from_slice::<BalloonDeviceConfig>(body.raw())?,
)))
}
fn parse_patch_hinting<'a, T>(
body: Option<&Body>,
mut path_tokens: T,
) -> Result<ParsedRequest, RequestError>
where
T: Iterator<Item = &'a str>,
{
match path_tokens.next() {
Some("start") => {
let cmd = match body {
None => Default::default(),
Some(b) if b.is_empty() => Default::default(),
Some(b) => serde_json::from_slice(b.raw())?,
};
Ok(ParsedRequest::new_sync(VmmAction::StartFreePageHinting(
cmd,
)))
}
Some("stop") => Ok(ParsedRequest::new_sync(VmmAction::StopFreePageHinting)),
Some(stats_path) => Err(RequestError::Generic(
StatusCode::BadRequest,
format!("Unrecognized PATCH request path `/hinting/{stats_path}`."),
)),
None => Err(RequestError::Generic(
StatusCode::BadRequest,
"Unrecognized PATCH request path `/hinting/`.".to_string(),
)),
}
}
pub(crate) fn parse_patch_balloon<'a, T>(
body: Option<&Body>,
mut path_tokens: T,
) -> Result<ParsedRequest, RequestError>
where
T: Iterator<Item = &'a str>,
{
match (path_tokens.next(), body) {
(Some("statistics"), Some(body)) => {
Ok(ParsedRequest::new_sync(VmmAction::UpdateBalloonStatistics(
serde_json::from_slice::<BalloonUpdateStatsConfig>(body.raw())?,
)))
}
(Some("hinting"), body) => parse_patch_hinting(body, path_tokens),
(_, Some(body)) => Ok(ParsedRequest::new_sync(VmmAction::UpdateBalloon(
serde_json::from_slice::<BalloonUpdateConfig>(body.raw())?,
))),
(_, None) => method_to_error(Method::Patch),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_get_balloon_request() {
parse_get_balloon([].into_iter()).unwrap();
parse_get_balloon(["unrelated"].into_iter()).unwrap_err();
parse_get_balloon(["statistics"].into_iter()).unwrap();
parse_get_balloon(["hinting", "status"].into_iter()).unwrap();
parse_get_balloon(["hinting", "unrelated"].into_iter()).unwrap_err();
parse_get_balloon(["hinting"].into_iter()).unwrap_err();
}
#[test]
fn test_parse_patch_balloon_request() {
parse_patch_balloon(Some(&Body::new("invalid_payload")), [].into_iter()).unwrap_err();
// PATCH with invalid fields.
let body = r#"{
"amount_mib": "bar",
"foo": "bar"
}"#;
parse_patch_balloon(Some(&Body::new(body)), [].into_iter()).unwrap_err();
// PATCH with invalid types on fields. Adding a polling interval as string instead of bool.
let body = r#"{
"amount_mib": 1000,
"stats_polling_interval_s": "false"
}"#;
let res = parse_patch_balloon(Some(&Body::new(body)), [].into_iter());
res.unwrap_err();
// PATCH with invalid types on fields. Adding a amount_mib as a negative number.
let body = r#"{
"amount_mib": -1000,
"stats_polling_interval_s": true
}"#;
let res = parse_patch_balloon(Some(&Body::new(body)), [].into_iter());
res.unwrap_err();
// PATCH on statistics with missing ppolling interval field.
let body = r#"{
"amount_mib": 100
}"#;
let res = parse_patch_balloon(Some(&Body::new(body)), ["statistics"].into_iter());
res.unwrap_err();
// PATCH with missing amount_mib field.
let body = r#"{
"stats_polling_interval_s": 0
}"#;
let res = parse_patch_balloon(Some(&Body::new(body)), [].into_iter());
res.unwrap_err();
// PATCH that tries to update something else other than allowed fields.
let body = r#"{
"amount_mib": "dummy_id",
"stats_polling_interval_s": "dummy_host"
}"#;
let res = parse_patch_balloon(Some(&Body::new(body)), [].into_iter());
res.unwrap_err();
// PATCH with payload that is not a json.
let body = r#"{
"fields": "dummy_field"
}"#;
parse_patch_balloon(Some(&Body::new(body)), [].into_iter()).unwrap_err();
// PATCH on unrecognized path.
let body = r#"{
"fields": "dummy_field"
}"#;
parse_patch_balloon(Some(&Body::new(body)), ["config"].into_iter()).unwrap_err();
let body = r#"{
"amount_mib": 1
}"#;
let expected_config = BalloonUpdateConfig { amount_mib: 1 };
assert_eq!(
vmm_action_from_request(
parse_patch_balloon(Some(&Body::new(body)), [].into_iter()).unwrap()
),
VmmAction::UpdateBalloon(expected_config)
);
let body = r#"{
"stats_polling_interval_s": 1
}"#;
let expected_config = BalloonUpdateStatsConfig {
stats_polling_interval_s: 1,
};
assert_eq!(
vmm_action_from_request(
parse_patch_balloon(Some(&Body::new(body)), ["statistics"].into_iter()).unwrap()
),
VmmAction::UpdateBalloonStatistics(expected_config)
);
// PATCH start hinting run valid data
let body = r#"{
"acknowledge_on_stop": true
}"#;
parse_patch_balloon(Some(&Body::new(body)), ["hinting", "start"].into_iter()).unwrap();
// PATCH start hinting run no body
parse_patch_balloon(Some(&Body::new("")), ["hinting", "start"].into_iter()).unwrap();
// PATCH start hinting run invalid data
let body = r#"{
"acknowledge_on_stop": "not valid"
}"#;
parse_patch_balloon(Some(&Body::new(body)), ["hinting", "start"].into_iter()).unwrap_err();
// PATCH start hinting run no body
parse_patch_balloon(Some(&Body::new(body)), ["hinting", "start"].into_iter()).unwrap_err();
// PATCH stop hinting run
parse_patch_balloon(Some(&Body::new("")), ["hinting", "stop"].into_iter()).unwrap();
// PATCH stop hinting run
parse_patch_balloon(None, ["hinting", "stop"].into_iter()).unwrap();
// PATCH stop hinting invalid path
parse_patch_balloon(Some(&Body::new("")), ["hinting"].into_iter()).unwrap_err();
// PATCH stop hinting invalid path
parse_patch_balloon(Some(&Body::new("")), ["hinting", "other path"].into_iter())
.unwrap_err();
// PATCH no body non hinting
parse_patch_balloon(None, ["hinting"].into_iter()).unwrap_err();
}
#[test]
fn test_parse_put_balloon_request() {
parse_put_balloon(&Body::new("invalid_payload")).unwrap_err();
// PUT with invalid fields.
let body = r#"{
"amount_mib": "bar",
"is_read_only": false
}"#;
parse_put_balloon(&Body::new(body)).unwrap_err();
// PUT with valid input fields. Hinting reporting missing
let body = r#"{
"amount_mib": 1000,
"deflate_on_oom": true,
"stats_polling_interval_s": 0
}"#;
parse_put_balloon(&Body::new(body)).unwrap();
// PUT with valid input hinting
let body = r#"{
"amount_mib": 1000,
"deflate_on_oom": true,
"stats_polling_interval_s": 0,
"free_page_hinting": true
}"#;
parse_put_balloon(&Body::new(body)).unwrap();
// PUT with valid reporting
let body = r#"{
"amount_mib": 1000,
"deflate_on_oom": true,
"stats_polling_interval_s": 0,
"free_page_reporting": true
}"#;
parse_put_balloon(&Body::new(body)).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/hotplug/memory.rs | src/firecracker/src/api_server/request/hotplug/memory.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use micro_http::Body;
use vmm::logger::{IncMetric, METRICS};
use vmm::rpc_interface::VmmAction;
use vmm::vmm_config::memory_hotplug::{MemoryHotplugConfig, MemoryHotplugSizeUpdate};
use crate::api_server::parsed_request::{ParsedRequest, RequestError};
pub(crate) fn parse_put_memory_hotplug(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.put_api_requests.hotplug_memory_count.inc();
let config = serde_json::from_slice::<MemoryHotplugConfig>(body.raw()).inspect_err(|_| {
METRICS.put_api_requests.hotplug_memory_fails.inc();
})?;
Ok(ParsedRequest::new_sync(VmmAction::SetMemoryHotplugDevice(
config,
)))
}
pub(crate) fn parse_get_memory_hotplug() -> Result<ParsedRequest, RequestError> {
METRICS.get_api_requests.hotplug_memory_count.inc();
Ok(ParsedRequest::new_sync(VmmAction::GetMemoryHotplugStatus))
}
pub(crate) fn parse_patch_memory_hotplug(body: &Body) -> Result<ParsedRequest, RequestError> {
METRICS.patch_api_requests.hotplug_memory_count.inc();
let config =
serde_json::from_slice::<MemoryHotplugSizeUpdate>(body.raw()).inspect_err(|_| {
METRICS.patch_api_requests.hotplug_memory_fails.inc();
})?;
Ok(ParsedRequest::new_sync(VmmAction::UpdateMemoryHotplugSize(
config,
)))
}
#[cfg(test)]
mod tests {
use vmm::devices::virtio::mem::{
VIRTIO_MEM_DEFAULT_BLOCK_SIZE_MIB, VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB,
};
use vmm::vmm_config::memory_hotplug::MemoryHotplugSizeUpdate;
use super::*;
use crate::api_server::parsed_request::tests::vmm_action_from_request;
#[test]
fn test_parse_put_memory_hotplug_request() {
parse_put_memory_hotplug(&Body::new("invalid_payload")).unwrap_err();
// PUT with invalid fields.
let body = r#"{
"total_size_mib": "bar"
}"#;
parse_put_memory_hotplug(&Body::new(body)).unwrap_err();
// PUT with valid input fields with defaults.
let body = r#"{
"total_size_mib": 2048
}"#;
let expected_config = MemoryHotplugConfig {
total_size_mib: 2048,
block_size_mib: VIRTIO_MEM_DEFAULT_BLOCK_SIZE_MIB,
slot_size_mib: VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB,
};
assert_eq!(
vmm_action_from_request(parse_put_memory_hotplug(&Body::new(body)).unwrap()),
VmmAction::SetMemoryHotplugDevice(expected_config)
);
// PUT with valid input fields.
let body = r#"{
"total_size_mib": 2048,
"block_size_mib": 64,
"slot_size_mib": 64
}"#;
let expected_config = MemoryHotplugConfig {
total_size_mib: 2048,
block_size_mib: 64,
slot_size_mib: 64,
};
assert_eq!(
vmm_action_from_request(parse_put_memory_hotplug(&Body::new(body)).unwrap()),
VmmAction::SetMemoryHotplugDevice(expected_config)
);
}
#[test]
fn test_parse_parse_get_memory_hotplug_request() {
assert_eq!(
vmm_action_from_request(parse_get_memory_hotplug().unwrap()),
VmmAction::GetMemoryHotplugStatus
);
}
#[test]
fn test_parse_patch_memory_hotplug_request() {
parse_patch_memory_hotplug(&Body::new("invalid_payload")).unwrap_err();
// PATCH with invalid fields.
let body = r#"{
"requested_size_mib": "bar"
}"#;
parse_patch_memory_hotplug(&Body::new(body)).unwrap_err();
// PATCH with valid input fields.
let body = r#"{
"requested_size_mib": 2048
}"#;
let expected_config = MemoryHotplugSizeUpdate {
requested_size_mib: 2048,
};
assert_eq!(
vmm_action_from_request(parse_patch_memory_hotplug(&Body::new(body)).unwrap()),
VmmAction::UpdateMemoryHotplugSize(expected_config)
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/api_server/request/hotplug/mod.rs | src/firecracker/src/api_server/request/hotplug/mod.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod memory;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/generated/prctl.rs | src/firecracker/src/generated/prctl.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
pub const PR_SET_PDEATHSIG: u32 = 1;
pub const PR_GET_PDEATHSIG: u32 = 2;
pub const PR_GET_DUMPABLE: u32 = 3;
pub const PR_SET_DUMPABLE: u32 = 4;
pub const PR_GET_UNALIGN: u32 = 5;
pub const PR_SET_UNALIGN: u32 = 6;
pub const PR_UNALIGN_NOPRINT: u32 = 1;
pub const PR_UNALIGN_SIGBUS: u32 = 2;
pub const PR_GET_KEEPCAPS: u32 = 7;
pub const PR_SET_KEEPCAPS: u32 = 8;
pub const PR_GET_FPEMU: u32 = 9;
pub const PR_SET_FPEMU: u32 = 10;
pub const PR_FPEMU_NOPRINT: u32 = 1;
pub const PR_FPEMU_SIGFPE: u32 = 2;
pub const PR_GET_FPEXC: u32 = 11;
pub const PR_SET_FPEXC: u32 = 12;
pub const PR_FP_EXC_SW_ENABLE: u32 = 128;
pub const PR_FP_EXC_DIV: u32 = 65536;
pub const PR_FP_EXC_OVF: u32 = 131072;
pub const PR_FP_EXC_UND: u32 = 262144;
pub const PR_FP_EXC_RES: u32 = 524288;
pub const PR_FP_EXC_INV: u32 = 1048576;
pub const PR_FP_EXC_DISABLED: u32 = 0;
pub const PR_FP_EXC_NONRECOV: u32 = 1;
pub const PR_FP_EXC_ASYNC: u32 = 2;
pub const PR_FP_EXC_PRECISE: u32 = 3;
pub const PR_GET_TIMING: u32 = 13;
pub const PR_SET_TIMING: u32 = 14;
pub const PR_TIMING_STATISTICAL: u32 = 0;
pub const PR_TIMING_TIMESTAMP: u32 = 1;
pub const PR_SET_NAME: u32 = 15;
pub const PR_GET_NAME: u32 = 16;
pub const PR_GET_ENDIAN: u32 = 19;
pub const PR_SET_ENDIAN: u32 = 20;
pub const PR_ENDIAN_BIG: u32 = 0;
pub const PR_ENDIAN_LITTLE: u32 = 1;
pub const PR_ENDIAN_PPC_LITTLE: u32 = 2;
pub const PR_GET_SECCOMP: u32 = 21;
pub const PR_SET_SECCOMP: u32 = 22;
pub const PR_CAPBSET_READ: u32 = 23;
pub const PR_CAPBSET_DROP: u32 = 24;
pub const PR_GET_TSC: u32 = 25;
pub const PR_SET_TSC: u32 = 26;
pub const PR_TSC_ENABLE: u32 = 1;
pub const PR_TSC_SIGSEGV: u32 = 2;
pub const PR_GET_SECUREBITS: u32 = 27;
pub const PR_SET_SECUREBITS: u32 = 28;
pub const PR_SET_TIMERSLACK: u32 = 29;
pub const PR_GET_TIMERSLACK: u32 = 30;
pub const PR_TASK_PERF_EVENTS_DISABLE: u32 = 31;
pub const PR_TASK_PERF_EVENTS_ENABLE: u32 = 32;
pub const PR_MCE_KILL: u32 = 33;
pub const PR_MCE_KILL_CLEAR: u32 = 0;
pub const PR_MCE_KILL_SET: u32 = 1;
pub const PR_MCE_KILL_LATE: u32 = 0;
pub const PR_MCE_KILL_EARLY: u32 = 1;
pub const PR_MCE_KILL_DEFAULT: u32 = 2;
pub const PR_MCE_KILL_GET: u32 = 34;
pub const PR_SET_MM: u32 = 35;
pub const PR_SET_MM_START_CODE: u32 = 1;
pub const PR_SET_MM_END_CODE: u32 = 2;
pub const PR_SET_MM_START_DATA: u32 = 3;
pub const PR_SET_MM_END_DATA: u32 = 4;
pub const PR_SET_MM_START_STACK: u32 = 5;
pub const PR_SET_MM_START_BRK: u32 = 6;
pub const PR_SET_MM_BRK: u32 = 7;
pub const PR_SET_MM_ARG_START: u32 = 8;
pub const PR_SET_MM_ARG_END: u32 = 9;
pub const PR_SET_MM_ENV_START: u32 = 10;
pub const PR_SET_MM_ENV_END: u32 = 11;
pub const PR_SET_MM_AUXV: u32 = 12;
pub const PR_SET_MM_EXE_FILE: u32 = 13;
pub const PR_SET_MM_MAP: u32 = 14;
pub const PR_SET_MM_MAP_SIZE: u32 = 15;
pub const PR_SET_PTRACER: u32 = 1499557217;
pub const PR_SET_CHILD_SUBREAPER: u32 = 36;
pub const PR_GET_CHILD_SUBREAPER: u32 = 37;
pub const PR_SET_NO_NEW_PRIVS: u32 = 38;
pub const PR_GET_NO_NEW_PRIVS: u32 = 39;
pub const PR_GET_TID_ADDRESS: u32 = 40;
pub const PR_SET_THP_DISABLE: u32 = 41;
pub const PR_GET_THP_DISABLE: u32 = 42;
pub const PR_MPX_ENABLE_MANAGEMENT: u32 = 43;
pub const PR_MPX_DISABLE_MANAGEMENT: u32 = 44;
pub const PR_SET_FP_MODE: u32 = 45;
pub const PR_GET_FP_MODE: u32 = 46;
pub const PR_FP_MODE_FR: u32 = 1;
pub const PR_FP_MODE_FRE: u32 = 2;
pub const PR_CAP_AMBIENT: u32 = 47;
pub const PR_CAP_AMBIENT_IS_SET: u32 = 1;
pub const PR_CAP_AMBIENT_RAISE: u32 = 2;
pub const PR_CAP_AMBIENT_LOWER: u32 = 3;
pub const PR_CAP_AMBIENT_CLEAR_ALL: u32 = 4;
pub const PR_SVE_SET_VL: u32 = 50;
pub const PR_SVE_SET_VL_ONEXEC: u32 = 262144;
pub const PR_SVE_GET_VL: u32 = 51;
pub const PR_SVE_VL_LEN_MASK: u32 = 65535;
pub const PR_SVE_VL_INHERIT: u32 = 131072;
pub const PR_GET_SPECULATION_CTRL: u32 = 52;
pub const PR_SET_SPECULATION_CTRL: i32 = 53;
pub const PR_SPEC_STORE_BYPASS: u32 = 0;
pub const PR_SPEC_INDIRECT_BRANCH: u32 = 1;
pub const PR_SPEC_L1D_FLUSH: u32 = 2;
pub const PR_SPEC_NOT_AFFECTED: u32 = 0;
pub const PR_SPEC_PRCTL: u32 = 1;
pub const PR_SPEC_ENABLE: u32 = 2;
pub const PR_SPEC_DISABLE: u32 = 4;
pub const PR_SPEC_FORCE_DISABLE: u32 = 8;
pub const PR_SPEC_DISABLE_NOEXEC: u32 = 16;
pub const PR_PAC_RESET_KEYS: u32 = 54;
pub const PR_PAC_APIAKEY: u32 = 1;
pub const PR_PAC_APIBKEY: u32 = 2;
pub const PR_PAC_APDAKEY: u32 = 4;
pub const PR_PAC_APDBKEY: u32 = 8;
pub const PR_PAC_APGAKEY: u32 = 16;
pub const PR_SET_TAGGED_ADDR_CTRL: u32 = 55;
pub const PR_GET_TAGGED_ADDR_CTRL: u32 = 56;
pub const PR_TAGGED_ADDR_ENABLE: u32 = 1;
pub const PR_MTE_TCF_NONE: u32 = 0;
pub const PR_MTE_TCF_SYNC: u32 = 2;
pub const PR_MTE_TCF_ASYNC: u32 = 4;
pub const PR_MTE_TCF_MASK: u32 = 6;
pub const PR_MTE_TAG_SHIFT: u32 = 3;
pub const PR_MTE_TAG_MASK: u32 = 524280;
pub const PR_MTE_TCF_SHIFT: u32 = 1;
pub const PR_PMLEN_SHIFT: u32 = 24;
pub const PR_PMLEN_MASK: u32 = 2130706432;
pub const PR_SET_IO_FLUSHER: u32 = 57;
pub const PR_GET_IO_FLUSHER: u32 = 58;
pub const PR_SET_SYSCALL_USER_DISPATCH: u32 = 59;
pub const PR_SYS_DISPATCH_OFF: u32 = 0;
pub const PR_SYS_DISPATCH_ON: u32 = 1;
pub const PR_PAC_SET_ENABLED_KEYS: u32 = 60;
pub const PR_PAC_GET_ENABLED_KEYS: u32 = 61;
pub const PR_SCHED_CORE: u32 = 62;
pub const PR_SCHED_CORE_GET: u32 = 0;
pub const PR_SCHED_CORE_CREATE: u32 = 1;
pub const PR_SCHED_CORE_SHARE_TO: u32 = 2;
pub const PR_SCHED_CORE_SHARE_FROM: u32 = 3;
pub const PR_SCHED_CORE_MAX: u32 = 4;
pub const PR_SCHED_CORE_SCOPE_THREAD: u32 = 0;
pub const PR_SCHED_CORE_SCOPE_THREAD_GROUP: u32 = 1;
pub const PR_SCHED_CORE_SCOPE_PROCESS_GROUP: u32 = 2;
pub const PR_SME_SET_VL: u32 = 63;
pub const PR_SME_SET_VL_ONEXEC: u32 = 262144;
pub const PR_SME_GET_VL: u32 = 64;
pub const PR_SME_VL_LEN_MASK: u32 = 65535;
pub const PR_SME_VL_INHERIT: u32 = 131072;
pub const PR_SET_MDWE: u32 = 65;
pub const PR_MDWE_REFUSE_EXEC_GAIN: u32 = 1;
pub const PR_MDWE_NO_INHERIT: u32 = 2;
pub const PR_GET_MDWE: u32 = 66;
pub const PR_SET_VMA: u32 = 1398164801;
pub const PR_SET_VMA_ANON_NAME: u32 = 0;
pub const PR_GET_AUXV: u32 = 1096112214;
pub const PR_SET_MEMORY_MERGE: u32 = 67;
pub const PR_GET_MEMORY_MERGE: u32 = 68;
pub const PR_RISCV_V_SET_CONTROL: u32 = 69;
pub const PR_RISCV_V_GET_CONTROL: u32 = 70;
pub const PR_RISCV_V_VSTATE_CTRL_DEFAULT: u32 = 0;
pub const PR_RISCV_V_VSTATE_CTRL_OFF: u32 = 1;
pub const PR_RISCV_V_VSTATE_CTRL_ON: u32 = 2;
pub const PR_RISCV_V_VSTATE_CTRL_INHERIT: u32 = 16;
pub const PR_RISCV_V_VSTATE_CTRL_CUR_MASK: u32 = 3;
pub const PR_RISCV_V_VSTATE_CTRL_NEXT_MASK: u32 = 12;
pub const PR_RISCV_V_VSTATE_CTRL_MASK: u32 = 31;
pub const PR_RISCV_SET_ICACHE_FLUSH_CTX: u32 = 71;
pub const PR_RISCV_CTX_SW_FENCEI_ON: u32 = 0;
pub const PR_RISCV_CTX_SW_FENCEI_OFF: u32 = 1;
pub const PR_RISCV_SCOPE_PER_PROCESS: u32 = 0;
pub const PR_RISCV_SCOPE_PER_THREAD: u32 = 1;
pub const PR_PPC_GET_DEXCR: u32 = 72;
pub const PR_PPC_SET_DEXCR: u32 = 73;
pub const PR_PPC_DEXCR_SBHE: u32 = 0;
pub const PR_PPC_DEXCR_IBRTPD: u32 = 1;
pub const PR_PPC_DEXCR_SRAPD: u32 = 2;
pub const PR_PPC_DEXCR_NPHIE: u32 = 3;
pub const PR_PPC_DEXCR_CTRL_EDITABLE: u32 = 1;
pub const PR_PPC_DEXCR_CTRL_SET: u32 = 2;
pub const PR_PPC_DEXCR_CTRL_CLEAR: u32 = 4;
pub const PR_PPC_DEXCR_CTRL_SET_ONEXEC: u32 = 8;
pub const PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC: u32 = 16;
pub const PR_PPC_DEXCR_CTRL_MASK: u32 = 31;
pub const PR_GET_SHADOW_STACK_STATUS: u32 = 74;
pub const PR_SET_SHADOW_STACK_STATUS: u32 = 75;
pub const PR_SHADOW_STACK_ENABLE: u32 = 1;
pub const PR_SHADOW_STACK_WRITE: u32 = 2;
pub const PR_SHADOW_STACK_PUSH: u32 = 4;
pub const PR_LOCK_SHADOW_STACK_STATUS: u32 = 76;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/src/generated/mod.rs | src/firecracker/src/generated/mod.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod prctl;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/tests/verify_dependencies.rs | src/firecracker/tests/verify_dependencies.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::tests_outside_test_module)]
use std::collections::HashMap;
use std::fmt::Debug;
use std::path::Path;
use cargo_toml::{Dependency, DepsSet, Manifest};
use regex::Regex;
#[test]
fn test_no_comparison_requirements() {
// HashMap mapping crate -> [(violating dependency, specified version)]
let mut violating_dependencies = HashMap::new();
let src_firecracker_path = std::env::var("CARGO_MANIFEST_DIR").unwrap();
let src_path = format!("{}/..", src_firecracker_path);
for fc_crate in std::fs::read_dir(src_path).unwrap() {
let fc_crate = fc_crate.unwrap();
if fc_crate.metadata().unwrap().is_dir() {
let violating_in_crate =
violating_dependencies_of_cargo_toml(fc_crate.path().join("Cargo.toml"));
if !violating_in_crate.is_empty() {
violating_dependencies.insert(
fc_crate.file_name().into_string().unwrap(),
violating_in_crate,
);
}
}
}
assert_eq!(
violating_dependencies,
HashMap::new(),
"Dependencies should not be specified as comparison requirements. \
They should use caret requirements. See: \
https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html"
);
}
/// Parses the specified Cargo.toml file and returns any dependencies specified using a comparison
/// requirements.
///
/// The return value maps the name of violating dependencies to the specified version
fn violating_dependencies_of_cargo_toml<T: AsRef<Path> + Debug>(
path: T,
) -> HashMap<String, String> {
let manifest = Manifest::from_path(path).unwrap();
violating_dependencies_of_depsset(manifest.dependencies)
.chain(violating_dependencies_of_depsset(manifest.dev_dependencies))
.chain(violating_dependencies_of_depsset(
manifest.build_dependencies,
))
.collect()
}
/// Returns an iterator over all dependencies in the given DepsSet specified using comparison
/// requirements
///
/// The iterator produces tuples of the form (violating dependency, specified version)
fn violating_dependencies_of_depsset(depsset: DepsSet) -> impl Iterator<Item = (String, String)> {
depsset
.into_iter()
.filter_map(|(name, dependency)| {
match dependency {
Dependency::Simple(version) => Some((name, version)), // dependencies specified as `libc = "0.2.117"`
Dependency::Detailed(dependency_detail) => {
dependency_detail.version.map(|version| (name, version))
} // dependencies specified without version, such as `libc = {path = "../libc"}
_ => None,
}
})
.filter(|(_, version)| !Regex::new(r"^=?\d*\.\d*\.\d*$").unwrap().is_match(version))
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/examples/uffd/on_demand_handler.rs | src/firecracker/examples/uffd/on_demand_handler.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Provides functionality for a userspace page fault handler
//! which loads the whole region from the backing memory file
//! when a page fault occurs.
mod uffd_utils;
use std::fs::File;
use std::os::unix::net::UnixListener;
use uffd_utils::{Runtime, UffdHandler};
fn main() {
let mut args = std::env::args();
let uffd_sock_path = args.nth(1).expect("No socket path given");
let mem_file_path = args.next().expect("No memory file given");
let file = File::open(mem_file_path).expect("Cannot open memfile");
// Get Uffd from UDS. We'll use the uffd to handle PFs for Firecracker.
let listener = UnixListener::bind(uffd_sock_path).expect("Cannot bind to socket path");
let (stream, _) = listener.accept().expect("Cannot listen on UDS socket");
let mut runtime = Runtime::new(stream, file);
runtime.install_panic_hook();
runtime.run(|uffd_handler: &mut UffdHandler| {
// !DISCLAIMER!
// When using UFFD together with the balloon device, this handler needs to deal with
// `remove` and `pagefault` events. There are multiple things to keep in mind in
// such setups:
//
// As long as any `remove` event is pending in the UFFD queue, all ioctls return EAGAIN
// -----------------------------------------------------------------------------------
//
// This means we cannot process UFFD events simply one-by-one anymore - if a `remove` event
// arrives, we need to pre-fetch all other events up to the `remove` event, to unblock the
// UFFD, and then go back to the process the pre-fetched events.
//
// UFFD might receive events in not in their causal order
// -----------------------------------------------------
//
// For example, the guest
// kernel might first respond to a balloon inflation by freeing some memory, and
// telling Firecracker about this. Firecracker will then madvise(MADV_DONTNEED) the
// free memory range, which causes a `remove` event to be sent to UFFD. Then, the
// guest kernel might immediately fault the page in again (for example because
// default_on_oom was set). which causes a `pagefault` event to be sent to UFFD.
//
// However, the pagefault will be triggered from inside KVM on the vCPU thread, while the
// balloon device is handled by Firecracker on its VMM thread. This means that potentially
// this handler can receive the `pagefault` _before_ the `remove` event.
//
// This means that the simple "greedy" strategy of simply prefetching _all_ UFFD events
// to make sure no `remove` event is blocking us can result in the handler acting on
// the `pagefault` event before the `remove` message (despite the `remove` event being
// in the causal past of the `pagefault` event), which means that we will fault in a page
// from the snapshot file, while really we should be faulting in a zero page.
//
// In this example handler, we ignore this problem, to avoid
// complexity (under the assumption that the guest kernel will zero a newly faulted in
// page anyway). A production handler will most likely want to ensure that `remove`
// events for a specific range are always handled before `pagefault` events.
//
// Lastly, we still need to deal with the race condition where a `remove` event arrives
// in the UFFD queue after we got done reading all events, in which case we need to go
// back to reading more events before we can continue processing `pagefault`s.
let mut deferred_events = Vec::new();
loop {
// First, try events that we couldn't handle last round
let mut events_to_handle = Vec::from_iter(deferred_events.drain(..));
// Read all events from the userfaultfd.
while let Some(event) = uffd_handler.read_event().expect("Failed to read uffd_msg") {
events_to_handle.push(event);
}
for event in events_to_handle.drain(..) {
// We expect to receive either a Page Fault or `remove`
// event (if the balloon device is enabled).
match event {
userfaultfd::Event::Pagefault { addr, .. } => {
if !uffd_handler.serve_pf(addr.cast(), uffd_handler.page_size) {
deferred_events.push(event);
}
}
userfaultfd::Event::Remove { start, end } => {
uffd_handler.unregister_range(start, end)
}
_ => panic!("Unexpected event on userfaultfd"),
}
}
// We assume that really only the above removed/pagefault interaction can result in
// deferred events. In that scenario, the loop will always terminate (unless
// newly arriving `remove` events end up indefinitely blocking it, but there's nothing
// we can do about that, and it's a largely theoretical problem).
if deferred_events.is_empty() {
break;
}
}
});
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/examples/uffd/malicious_handler.rs | src/firecracker/examples/uffd/malicious_handler.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Provides functionality for a malicious page fault handler
//! which panics when a page fault occurs.
mod uffd_utils;
use std::fs::File;
use std::os::unix::net::UnixListener;
use uffd_utils::{Runtime, UffdHandler};
fn main() {
let mut args = std::env::args();
let uffd_sock_path = args.nth(1).expect("No socket path given");
let mem_file_path = args.next().expect("No memory file given");
let file = File::open(mem_file_path).expect("Cannot open memfile");
// Get Uffd from UDS. We'll use the uffd to handle PFs for Firecracker.
let listener = UnixListener::bind(uffd_sock_path).expect("Cannot bind to socket path");
let (stream, _) = listener.accept().expect("Cannot listen on UDS socket");
let mut runtime = Runtime::new(stream, file);
runtime.run(|uffd_handler: &mut UffdHandler| {
// Read an event from the userfaultfd.
let event = uffd_handler
.read_event()
.expect("Failed to read uffd_msg")
.expect("uffd_msg not ready");
if let userfaultfd::Event::Pagefault { .. } = event {
panic!("Fear me! I am the malicious page fault handler.")
}
});
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/examples/uffd/uffd_utils.rs | src/firecracker/examples/uffd/uffd_utils.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::undocumented_unsafe_blocks,
// Not everything is used by both binaries
dead_code
)]
use std::collections::HashMap;
use std::ffi::c_void;
use std::fs::File;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use std::os::unix::net::UnixStream;
use std::ptr;
use std::time::Duration;
use serde::{Deserialize, Serialize};
use userfaultfd::{Error, Event, Uffd};
use vmm_sys_util::sock_ctrl_msg::ScmSocket;
// This is the same with the one used in src/vmm.
/// This describes the mapping between Firecracker base virtual address and offset in the
/// buffer or file backend for a guest memory region. It is used to tell an external
/// process/thread where to populate the guest memory data for this range.
///
/// E.g. Guest memory contents for a region of `size` bytes can be found in the backend
/// at `offset` bytes from the beginning, and should be copied/populated into `base_host_address`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct GuestRegionUffdMapping {
/// Base host virtual address where the guest memory contents for this region
/// should be copied/populated.
pub base_host_virt_addr: u64,
/// Region size.
pub size: usize,
/// Offset in the backend file/buffer where the region contents are.
pub offset: u64,
/// The configured page size for this memory region.
pub page_size: usize,
}
impl GuestRegionUffdMapping {
fn contains(&self, fault_page_addr: u64) -> bool {
fault_page_addr >= self.base_host_virt_addr
&& fault_page_addr < self.base_host_virt_addr + self.size as u64
}
}
#[derive(Debug)]
pub struct UffdHandler {
pub mem_regions: Vec<GuestRegionUffdMapping>,
pub page_size: usize,
backing_buffer: *const u8,
uffd: Uffd,
}
impl UffdHandler {
fn try_get_mappings_and_file(
stream: &UnixStream,
) -> Result<(String, Option<File>), std::io::Error> {
let mut message_buf = vec![0u8; 1024];
let (bytes_read, file) = stream.recv_with_fd(&mut message_buf[..])?;
message_buf.resize(bytes_read, 0);
// We do not expect to receive non-UTF-8 data from Firecracker, so this is probably
// an error we can't recover from. Just immediately abort
let body = String::from_utf8(message_buf.clone()).unwrap_or_else(|_| {
panic!(
"Received body is not a utf-8 valid string. Raw bytes received: {message_buf:#?}"
)
});
Ok((body, file))
}
fn get_mappings_and_file(stream: &UnixStream) -> (String, File) {
// Sometimes, reading from the stream succeeds but we don't receive any
// UFFD descriptor. We don't really have a good understanding why this is
// happening, but let's try to be a bit more robust and retry a few times
// before we declare defeat.
for _ in 1..=5 {
match Self::try_get_mappings_and_file(stream) {
Ok((body, Some(file))) => {
return (body, file);
}
Ok((body, None)) => {
println!("Didn't receive UFFD over socket. We received: '{body}'. Retrying...");
}
Err(err) => {
println!("Could not get UFFD and mapping from Firecracker: {err}. Retrying...");
}
}
std::thread::sleep(Duration::from_millis(100));
}
panic!("Could not get UFFD and mappings after 5 retries");
}
pub fn from_unix_stream(stream: &UnixStream, backing_buffer: *const u8, size: usize) -> Self {
let (body, file) = Self::get_mappings_and_file(stream);
let mappings =
serde_json::from_str::<Vec<GuestRegionUffdMapping>>(&body).unwrap_or_else(|_| {
panic!("Cannot deserialize memory mappings. Received body: {body}")
});
let memsize: usize = mappings.iter().map(|r| r.size).sum();
// Page size is the same for all memory regions, so just grab the first one
let first_mapping = mappings.first().unwrap_or_else(|| {
panic!(
"Cannot get the first mapping. Mappings size is {}. Received body: {body}",
mappings.len()
)
});
let page_size = first_mapping.page_size;
// Make sure memory size matches backing data size.
assert_eq!(memsize, size);
assert!(page_size.is_power_of_two());
let uffd = unsafe { Uffd::from_raw_fd(file.into_raw_fd()) };
Self {
mem_regions: mappings,
page_size,
backing_buffer,
uffd,
}
}
pub fn read_event(&mut self) -> Result<Option<Event>, Error> {
self.uffd.read_event()
}
pub fn unregister_range(&mut self, start: *mut c_void, end: *mut c_void) {
assert!(
(start as usize).is_multiple_of(self.page_size)
&& (end as usize).is_multiple_of(self.page_size)
&& end > start
);
// SAFETY: start and end are valid and provided by UFFD
let len = unsafe { end.offset_from_unsigned(start) };
self.uffd
.unregister(start, len)
.expect("range should be valid");
}
pub fn serve_pf(&mut self, addr: *mut u8, len: usize) -> bool {
// Find the start of the page that the current faulting address belongs to.
let dst = (addr as usize & !(self.page_size - 1)) as *mut libc::c_void;
let fault_page_addr = dst as u64;
for region in self.mem_regions.iter() {
if region.contains(fault_page_addr) {
return self.populate_from_file(region, fault_page_addr, len);
}
}
panic!(
"Could not find addr: {:?} within guest region mappings.",
addr
);
}
fn populate_from_file(&self, region: &GuestRegionUffdMapping, dst: u64, len: usize) -> bool {
let offset = dst - region.base_host_virt_addr;
let src = self.backing_buffer as u64 + region.offset + offset;
unsafe {
match self.uffd.copy(src as *const _, dst as *mut _, len, true) {
// Make sure the UFFD copied some bytes.
Ok(value) => assert!(value > 0),
// Catch EAGAIN errors, which occur when a `remove` event lands in the UFFD
// queue while we're processing `pagefault` events.
// The weird cast is because the `bytes_copied` field is based on the
// `uffdio_copy->copy` field, which is a signed 64 bit integer, and if something
// goes wrong, it gets set to a -errno code. However, uffd-rs always casts this
// value to an unsigned `usize`, which scrambled the errno.
Err(Error::PartiallyCopied(bytes_copied))
if bytes_copied == 0 || bytes_copied == (-libc::EAGAIN) as usize =>
{
return false;
}
Err(Error::CopyFailed(errno))
if std::io::Error::from(errno).raw_os_error().unwrap() == libc::EEXIST => {}
Err(e) => {
panic!("Uffd copy failed: {e:?}");
}
}
};
true
}
}
#[derive(Debug)]
pub struct Runtime {
stream: UnixStream,
backing_file: File,
backing_memory: *mut u8,
backing_memory_size: usize,
uffds: HashMap<i32, UffdHandler>,
}
impl Runtime {
pub fn new(stream: UnixStream, backing_file: File) -> Self {
let file_meta = backing_file
.metadata()
.expect("can not get backing file metadata");
let backing_memory_size = file_meta.len() as usize;
// # Safety:
// File size and fd are valid
let ret = unsafe {
libc::mmap(
ptr::null_mut(),
backing_memory_size,
libc::PROT_READ,
libc::MAP_PRIVATE | libc::MAP_POPULATE,
backing_file.as_raw_fd(),
0,
)
};
if ret == libc::MAP_FAILED {
panic!("mmap on backing file failed");
}
Self {
stream,
backing_file,
backing_memory: ret.cast(),
backing_memory_size,
uffds: HashMap::default(),
}
}
fn peer_process_credentials(&self) -> libc::ucred {
let mut creds: libc::ucred = libc::ucred {
pid: 0,
gid: 0,
uid: 0,
};
let mut creds_size = size_of::<libc::ucred>() as u32;
let ret = unsafe {
libc::getsockopt(
self.stream.as_raw_fd(),
libc::SOL_SOCKET,
libc::SO_PEERCRED,
(&raw mut creds).cast::<c_void>(),
&raw mut creds_size,
)
};
if ret != 0 {
panic!("Failed to get peer process credentials");
}
creds
}
pub fn install_panic_hook(&self) {
let peer_creds = self.peer_process_credentials();
let default_panic_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |panic_info| {
let r = unsafe { libc::kill(peer_creds.pid, libc::SIGKILL) };
if r != 0 {
eprintln!("Failed to kill Firecracker process from panic hook");
}
default_panic_hook(panic_info);
}));
}
/// Polls the `UnixStream` and UFFD fds in a loop.
/// When stream is polled, new uffd is retrieved.
/// When uffd is polled, page fault is handled by
/// calling `pf_event_dispatch` with corresponding
/// uffd object passed in.
pub fn run(&mut self, pf_event_dispatch: impl Fn(&mut UffdHandler)) {
let mut pollfds = vec![];
// Poll the stream for incoming uffds
pollfds.push(libc::pollfd {
fd: self.stream.as_raw_fd(),
events: libc::POLLIN,
revents: 0,
});
loop {
let pollfd_ptr = pollfds.as_mut_ptr();
let pollfd_size = pollfds.len() as u64;
// # Safety:
// Pollfds vector is valid
let mut nready = unsafe { libc::poll(pollfd_ptr, pollfd_size, -1) };
if nready == -1 {
panic!("Could not poll for events!")
}
for i in 0..pollfds.len() {
if nready == 0 {
break;
}
if pollfds[i].revents & libc::POLLIN != 0 {
nready -= 1;
if pollfds[i].fd == self.stream.as_raw_fd() {
// Handle new uffd from stream
let handler = UffdHandler::from_unix_stream(
&self.stream,
self.backing_memory,
self.backing_memory_size,
);
pollfds.push(libc::pollfd {
fd: handler.uffd.as_raw_fd(),
events: libc::POLLIN,
revents: 0,
});
self.uffds.insert(handler.uffd.as_raw_fd(), handler);
} else {
// Handle one of uffd page faults
pf_event_dispatch(self.uffds.get_mut(&pollfds[i].fd).unwrap());
}
}
}
// If connection is closed, we can skip the socket from being polled.
pollfds.retain(|pollfd| pollfd.revents & (libc::POLLRDHUP | libc::POLLHUP) == 0);
}
}
}
#[cfg(test)]
mod tests {
use std::mem::MaybeUninit;
use std::os::unix::net::UnixListener;
use vmm_sys_util::tempdir::TempDir;
use vmm_sys_util::tempfile::TempFile;
use super::*;
unsafe impl Send for Runtime {}
#[test]
fn test_runtime() {
let tmp_dir = TempDir::new().unwrap();
let dummy_socket_path = tmp_dir.as_path().join("dummy_socket");
let dummy_socket_path_clone = dummy_socket_path.clone();
let mut uninit_runtime = Box::new(MaybeUninit::<Runtime>::uninit());
// We will use this pointer to bypass a bunch of Rust Safety
// for the sake of convenience.
let runtime_ptr = uninit_runtime.as_ptr().cast::<Runtime>();
let runtime_thread = std::thread::spawn(move || {
let tmp_file = TempFile::new().unwrap();
tmp_file.as_file().set_len(0x1000).unwrap();
let dummy_mem_path = tmp_file.as_path();
let file = File::open(dummy_mem_path).expect("Cannot open memfile");
let listener =
UnixListener::bind(dummy_socket_path).expect("Cannot bind to socket path");
let (stream, _) = listener.accept().expect("Cannot listen on UDS socket");
// Update runtime with actual runtime
let runtime = uninit_runtime.write(Runtime::new(stream, file));
runtime.run(|_: &mut UffdHandler| {});
});
// wait for runtime thread to initialize itself
std::thread::sleep(std::time::Duration::from_millis(100));
let stream =
UnixStream::connect(dummy_socket_path_clone).expect("Cannot connect to the socket");
let dummy_memory_region = vec![GuestRegionUffdMapping {
base_host_virt_addr: 0,
size: 0x1000,
offset: 0,
page_size: 4096,
}];
let dummy_memory_region_json = serde_json::to_string(&dummy_memory_region).unwrap();
let dummy_file_1 = TempFile::new().unwrap();
let dummy_fd_1 = dummy_file_1.as_file().as_raw_fd();
stream
.send_with_fd(dummy_memory_region_json.as_bytes(), dummy_fd_1)
.unwrap();
// wait for the runtime thread to process message
std::thread::sleep(std::time::Duration::from_millis(100));
unsafe {
assert_eq!((*runtime_ptr).uffds.len(), 1);
}
let dummy_file_2 = TempFile::new().unwrap();
let dummy_fd_2 = dummy_file_2.as_file().as_raw_fd();
stream
.send_with_fd(dummy_memory_region_json.as_bytes(), dummy_fd_2)
.unwrap();
// wait for the runtime thread to process message
std::thread::sleep(std::time::Duration::from_millis(100));
unsafe {
assert_eq!((*runtime_ptr).uffds.len(), 2);
}
// there is no way to properly stop runtime, so
// we send a message with an incorrect memory region
// to cause runtime thread to panic
let error_memory_region = vec![GuestRegionUffdMapping {
base_host_virt_addr: 0,
size: 0,
offset: 0,
page_size: 4096,
}];
let error_memory_region_json = serde_json::to_string(&error_memory_region).unwrap();
stream
.send_with_fd(error_memory_region_json.as_bytes(), dummy_fd_2)
.unwrap();
runtime_thread.join().unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/examples/uffd/fault_all_handler.rs | src/firecracker/examples/uffd/fault_all_handler.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Provides functionality for a userspace page fault handler
//! which loads the whole region from the backing memory file
//! when a page fault occurs.
mod uffd_utils;
use std::fs::File;
use std::os::unix::net::UnixListener;
use uffd_utils::{Runtime, UffdHandler};
use utils::time::{ClockType, get_time_us};
fn main() {
let mut args = std::env::args();
let uffd_sock_path = args.nth(1).expect("No socket path given");
let mem_file_path = args.next().expect("No memory file given");
let file = File::open(mem_file_path).expect("Cannot open memfile");
// Get Uffd from UDS. We'll use the uffd to handle PFs for Firecracker.
let listener = UnixListener::bind(uffd_sock_path).expect("Cannot bind to socket path");
let (stream, _) = listener.accept().expect("Cannot listen on UDS socket");
let mut runtime = Runtime::new(stream, file);
runtime.install_panic_hook();
runtime.run(|uffd_handler: &mut UffdHandler| {
// Read an event from the userfaultfd.
let event = uffd_handler
.read_event()
.expect("Failed to read uffd_msg")
.expect("uffd_msg not ready");
match event {
userfaultfd::Event::Pagefault { .. } => {
let start = get_time_us(ClockType::Monotonic);
for region in uffd_handler.mem_regions.clone() {
uffd_handler.serve_pf(region.base_host_virt_addr as _, region.size);
}
let end = get_time_us(ClockType::Monotonic);
println!("Finished Faulting All: {}us", end - start);
}
_ => panic!("Unexpected event on userfaultfd"),
}
});
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/examples/seccomp/harmless.rs | src/firecracker/examples/seccomp/harmless.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
fn main() {
// SAFETY: This is just an example to demonstrate syscall filtering.
// The syscall is safe because we're only writing a static string to a file descriptor.
unsafe {
// Harmless print to standard output.
libc::syscall(libc::SYS_write, libc::STDOUT_FILENO, "Hello, world!\n", 14);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/examples/seccomp/panic.rs | src/firecracker/examples/seccomp/panic.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::env::args;
use std::fs::File;
use vmm::seccomp::{apply_filter, deserialize_binary};
fn main() {
let args: Vec<String> = args().collect();
let bpf_path = &args[1];
let filter_thread = &args[2];
let filter_file = File::open(bpf_path).unwrap();
let map = deserialize_binary(&filter_file).unwrap();
apply_filter(map.get(filter_thread).unwrap()).unwrap();
panic!("Expected panic.");
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/examples/seccomp/jailer.rs | src/firecracker/examples/seccomp/jailer.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::env::args;
use std::fs::File;
use std::os::unix::process::CommandExt;
use std::process::{Command, Stdio};
use vmm::seccomp::{apply_filter, deserialize_binary};
fn main() {
let args: Vec<String> = args().collect();
let exec_file = &args[1];
let bpf_path = &args[2];
let filter_file = File::open(bpf_path).unwrap();
let map = deserialize_binary(&filter_file).unwrap();
// Loads filters.
apply_filter(map.get("main").unwrap()).unwrap();
let _ = Command::new(exec_file)
.stdin(Stdio::inherit())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.exec();
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/firecracker/examples/seccomp/malicious.rs | src/firecracker/examples/seccomp/malicious.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
fn main() {
// SAFETY: This is just an example to demonstrate syscall filtering.
// The syscall is safe because we're only writing a static string to a file descriptor.
unsafe {
// In this example, the malicious component is outputting to standard input.
libc::syscall(libc::SYS_write, libc::STDIN_FILENO, "Hello, world!\n", 14);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/log-instrument-macros/src/lib.rs | src/log-instrument-macros/src/lib.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![warn(clippy::pedantic)]
extern crate proc_macro;
use quote::quote;
use syn::parse_quote;
/// Adds `log::trace!` events at the start and end of an attributed function.
///
/// # Panics
///
/// When applied to anything other than a function.
#[proc_macro_attribute]
pub fn instrument(
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let input = syn::parse_macro_input!(item as syn::Item);
let syn::Item::Fn(mut item_fn) = input else {
panic!("Instrument macro can only be on functions.")
};
let clippy_attr: syn::Attribute = parse_quote! {
#[allow(clippy::items_after_statements)]
};
item_fn.attrs.push(clippy_attr);
let item_fn_ident = item_fn.sig.ident.to_string();
let new_stmt: syn::Stmt =
parse_quote! { let __ = log_instrument::__Instrument::new(#item_fn_ident); };
item_fn.block.stmts.insert(0, new_stmt);
let out = quote! { #item_fn };
proc_macro::TokenStream::from(out)
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/utils/src/lib.rs | src/utils/src/lib.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod arg_parser;
pub mod time;
pub mod validators;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/utils/src/time.rs | src/utils/src/time.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::File;
use std::io::{ErrorKind, Read};
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::time::Duration;
use std::{fmt, ptr};
/// Constant to convert seconds to nanoseconds.
pub const NANOS_PER_SECOND: u64 = 1_000_000_000;
/// Constant to convert milliseconds to nanoseconds.
pub const NANOS_PER_MILLISECOND: u64 = 1_000_000;
/// Wrapper over `libc::clockid_t` to specify Linux Kernel clock source.
#[derive(Debug)]
pub enum ClockType {
/// Equivalent to `libc::CLOCK_MONOTONIC`.
Monotonic,
/// Equivalent to `libc::CLOCK_REALTIME`.
Real,
/// Equivalent to `libc::CLOCK_PROCESS_CPUTIME_ID`.
ProcessCpu,
/// Equivalent to `libc::CLOCK_THREAD_CPUTIME_ID`.
ThreadCpu,
}
impl From<ClockType> for libc::clockid_t {
fn from(clock_type: ClockType) -> Self {
match clock_type {
ClockType::Monotonic => libc::CLOCK_MONOTONIC,
ClockType::Real => libc::CLOCK_REALTIME,
ClockType::ProcessCpu => libc::CLOCK_PROCESS_CPUTIME_ID,
ClockType::ThreadCpu => libc::CLOCK_THREAD_CPUTIME_ID,
}
}
}
/// Structure representing the date in local time with nanosecond precision.
#[derive(Debug)]
pub struct LocalTime {
/// Seconds in current minute.
sec: i32,
/// Minutes in current hour.
min: i32,
/// Hours in current day, 24H format.
hour: i32,
/// Days in current month.
mday: i32,
/// Months in current year.
mon: i32,
/// Years passed since 1900 BC.
year: i32,
/// Nanoseconds in current second.
nsec: i64,
}
impl LocalTime {
/// Returns the [LocalTime](struct.LocalTime.html) structure for the calling moment.
pub fn now() -> LocalTime {
let mut timespec = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
let mut tm: libc::tm = libc::tm {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: 0,
tm_mon: 0,
tm_year: 0,
tm_wday: 0,
tm_yday: 0,
tm_isdst: 0,
tm_gmtoff: 0,
tm_zone: std::ptr::null(),
};
// SAFETY: Safe because the parameters are valid.
unsafe {
libc::clock_gettime(libc::CLOCK_REALTIME, &mut timespec);
libc::localtime_r(×pec.tv_sec, &mut tm);
}
LocalTime {
sec: tm.tm_sec,
min: tm.tm_min,
hour: tm.tm_hour,
mday: tm.tm_mday,
mon: tm.tm_mon,
year: tm.tm_year,
nsec: timespec.tv_nsec,
}
}
}
impl fmt::Display for LocalTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}-{:02}-{:02}T{:02}:{:02}:{:02}.{:09}",
self.year + 1900,
self.mon + 1,
self.mday,
self.hour,
self.min,
self.sec,
self.nsec
)
}
}
/// Holds a micro-second resolution timestamp with both the real time and cpu time.
#[derive(Debug, Clone)]
pub struct TimestampUs {
/// Real time in microseconds.
pub time_us: u64,
/// Cpu time in microseconds.
pub cputime_us: u64,
}
impl Default for TimestampUs {
fn default() -> TimestampUs {
TimestampUs {
time_us: get_time_us(ClockType::Monotonic),
cputime_us: get_time_us(ClockType::ProcessCpu),
}
}
}
/// Returns a timestamp in nanoseconds from a monotonic clock.
///
/// Uses `_rdstc` on `x86_64` and [`get_time`](fn.get_time.html) on other architectures.
pub fn timestamp_cycles() -> u64 {
#[cfg(target_arch = "x86_64")]
// SAFETY: Safe because there's nothing that can go wrong with this call.
unsafe {
std::arch::x86_64::_rdtsc()
}
#[cfg(not(target_arch = "x86_64"))]
{
get_time_ns(ClockType::Monotonic)
}
}
/// Returns a timestamp in nanoseconds based on the provided clock type.
///
/// # Arguments
///
/// * `clock_type` - Identifier of the Linux Kernel clock on which to act.
pub fn get_time_ns(clock_type: ClockType) -> u64 {
let mut time_struct = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
// SAFETY: Safe because the parameters are valid.
unsafe { libc::clock_gettime(clock_type.into(), &mut time_struct) };
u64::try_from(seconds_to_nanoseconds(time_struct.tv_sec).expect("Time conversion overflow"))
.unwrap()
+ u64::try_from(time_struct.tv_nsec).unwrap()
}
/// Returns a timestamp in microseconds based on the provided clock type.
///
/// # Arguments
///
/// * `clock_type` - Identifier of the Linux Kernel clock on which to act.
pub fn get_time_us(clock_type: ClockType) -> u64 {
get_time_ns(clock_type) / 1000
}
/// Returns a timestamp in milliseconds based on the provided clock type.
///
/// # Arguments
///
/// * `clock_type` - Identifier of the Linux Kernel clock on which to act.
pub fn get_time_ms(clock_type: ClockType) -> u64 {
get_time_ns(clock_type) / NANOS_PER_MILLISECOND
}
/// Converts a timestamp in seconds to an equivalent one in nanoseconds.
/// Returns `None` if the conversion overflows.
///
/// # Arguments
///
/// * `value` - Timestamp in seconds.
pub fn seconds_to_nanoseconds(value: i64) -> Option<i64> {
value.checked_mul(i64::try_from(NANOS_PER_SECOND).unwrap())
}
/// Wrapper for timerfd
#[derive(Debug)]
pub struct TimerFd(File);
#[allow(clippy::new_without_default)]
impl TimerFd {
/// Creates new MONOTONIC and NONBLOCK timerfd
pub fn new() -> Self {
// SAFETY: all arguments are valid constants
let fd = unsafe {
libc::timerfd_create(
libc::CLOCK_MONOTONIC,
libc::TFD_NONBLOCK | libc::TFD_CLOEXEC,
)
};
assert!(
0 <= fd,
"TimerFd creation failed: {:#}",
std::io::Error::last_os_error()
);
// SAFETY: we just created valid fd
TimerFd(unsafe { File::from_raw_fd(fd) })
}
/// Arm the timer to be triggered after `duration` and then
/// at optional `interval`
pub fn arm(&mut self, duration: Duration, interval: Option<Duration>) {
#[allow(clippy::cast_possible_wrap)]
let spec = libc::itimerspec {
it_value: libc::timespec {
tv_sec: duration.as_secs() as i64,
tv_nsec: duration.subsec_nanos() as i64,
},
it_interval: if let Some(interval) = interval {
libc::timespec {
tv_sec: interval.as_secs() as i64,
tv_nsec: interval.subsec_nanos() as i64,
}
} else {
libc::timespec {
tv_sec: 0,
tv_nsec: 0,
}
},
};
// SAFETY: Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { libc::timerfd_settime(self.as_raw_fd(), 0, &spec, ptr::null_mut()) };
assert!(
0 <= ret,
"TimerFd arm failed: {:#}",
std::io::Error::last_os_error()
);
}
/// Read the value from the timerfd. Since it is always created with NONBLOCK flag,
/// this function does not block.
pub fn read(&mut self) -> u64 {
let mut buf = [0u8; size_of::<u64>()];
match self.0.read(buf.as_mut_slice()) {
Ok(_) => u64::from_ne_bytes(buf),
Err(inner) if inner.kind() == ErrorKind::WouldBlock => 0,
Err(err) => panic!("TimerFd read failed: {err:#}"),
}
}
/// Tell if the timer is currently armed.
pub fn is_armed(&self) -> bool {
// SAFETY: Zero init of a PDO type.
let mut spec: libc::itimerspec = unsafe { std::mem::zeroed() };
// SAFETY: Safe because timerfd_gettime is trusted to only modify `spec`.
let ret = unsafe { libc::timerfd_gettime(self.as_raw_fd(), &mut spec) };
assert!(
0 <= ret,
"TimerFd arm failed: {:#}",
std::io::Error::last_os_error()
);
spec.it_value.tv_sec != 0 || spec.it_value.tv_nsec != 0
}
}
impl AsRawFd for TimerFd {
fn as_raw_fd(&self) -> RawFd {
self.0.as_raw_fd()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_time() {
for _ in 0..1000 {
assert!(get_time_ns(ClockType::Monotonic) <= get_time_ns(ClockType::Monotonic));
}
for _ in 0..1000 {
assert!(get_time_ns(ClockType::ProcessCpu) <= get_time_ns(ClockType::ProcessCpu));
}
for _ in 0..1000 {
assert!(get_time_ns(ClockType::ThreadCpu) <= get_time_ns(ClockType::ThreadCpu));
}
assert_ne!(get_time_ns(ClockType::Real), 0);
assert_ne!(get_time_us(ClockType::Real), 0);
assert!(get_time_ns(ClockType::Real) / 1000 <= get_time_us(ClockType::Real));
assert!(
get_time_ns(ClockType::Real) / NANOS_PER_MILLISECOND <= get_time_ms(ClockType::Real)
);
}
#[test]
fn test_local_time_display() {
let local_time = LocalTime {
sec: 30,
min: 15,
hour: 10,
mday: 4,
mon: 6,
year: 119,
nsec: 123_456_789,
};
assert_eq!(
String::from("2019-07-04T10:15:30.123456789"),
local_time.to_string()
);
let local_time = LocalTime {
sec: 5,
min: 5,
hour: 5,
mday: 23,
mon: 7,
year: 44,
nsec: 123,
};
assert_eq!(
String::from("1944-08-23T05:05:05.000000123"),
local_time.to_string()
);
let local_time = LocalTime::now();
assert!(local_time.mon >= 0 && local_time.mon <= 11);
}
#[test]
fn test_seconds_to_nanoseconds() {
assert_eq!(
u64::try_from(seconds_to_nanoseconds(100).unwrap()).unwrap(),
100 * NANOS_PER_SECOND
);
assert!(seconds_to_nanoseconds(9_223_372_037).is_none());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/utils/src/arg_parser.rs | src/utils/src/arg_parser.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::BTreeMap;
use std::{env, fmt, result};
pub type Result<T> = result::Result<T, UtilsArgParserError>;
const ARG_PREFIX: &str = "--";
const ARG_SEPARATOR: &str = "--";
const HELP_ARG: &str = "--help";
const SHORT_HELP_ARG: &str = "-h";
const VERSION_ARG: &str = "--version";
/// Errors associated with parsing and validating arguments.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum UtilsArgParserError {
/// Argument '{1}' cannot be used together with argument '{0}'.
ForbiddenArgument(String, String),
/// Argument '{0}' required, but not found.
MissingArgument(String),
/// The argument '{0}' requires a value, but none was supplied.
MissingValue(String),
/// Found argument '{0}' which wasn't expected, or isn't valid in this context.
UnexpectedArgument(String),
/// The argument '{0}' was provided more than once.
DuplicateArgument(String),
}
/// Keep information about the argument parser.
#[derive(Debug, Clone, Default)]
pub struct ArgParser<'a> {
arguments: Arguments<'a>,
}
impl<'a> ArgParser<'a> {
/// Create a new ArgParser instance.
pub fn new() -> Self {
ArgParser::default()
}
/// Add an argument with its associated `Argument` in `arguments`.
pub fn arg(mut self, argument: Argument<'a>) -> Self {
self.arguments.insert_arg(argument);
self
}
/// Parse the command line arguments.
pub fn parse_from_cmdline(&mut self) -> Result<()> {
self.arguments.parse_from_cmdline()
}
/// Concatenate the `help` information of every possible argument
/// in a message that represents the correct command line usage
/// for the application.
pub fn formatted_help(&self) -> String {
let mut help_builder = vec![];
let required_arguments = self.format_arguments(true);
if !required_arguments.is_empty() {
help_builder.push("required arguments:".to_string());
help_builder.push(required_arguments);
}
let optional_arguments = self.format_arguments(false);
if !optional_arguments.is_empty() {
// Add line break if `required_arguments` is pushed.
if !help_builder.is_empty() {
help_builder.push("".to_string());
}
help_builder.push("optional arguments:".to_string());
help_builder.push(optional_arguments);
}
help_builder.join("\n")
}
/// Return a reference to `arguments` field.
pub fn arguments(&self) -> &Arguments<'_> {
&self.arguments
}
// Filter arguments by whether or not it is required.
// Align arguments by setting width to length of the longest argument.
fn format_arguments(&self, is_required: bool) -> String {
let filtered_arguments = self
.arguments
.args
.values()
.filter(|arg| is_required == arg.required)
.collect::<Vec<_>>();
let max_arg_width = filtered_arguments
.iter()
.map(|arg| arg.format_name().len())
.max()
.unwrap_or(0);
filtered_arguments
.into_iter()
.map(|arg| arg.format_help(max_arg_width))
.collect::<Vec<_>>()
.join("\n")
}
}
/// Stores the characteristics of the `name` command line argument.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Argument<'a> {
name: &'a str,
required: bool,
requires: Option<&'a str>,
forbids: Vec<&'a str>,
takes_value: bool,
allow_multiple: bool,
default_value: Option<Value>,
help: Option<&'a str>,
user_value: Option<Value>,
}
impl<'a> Argument<'a> {
/// Create a new `Argument` that keeps the necessary information for an argument.
pub fn new(name: &'a str) -> Argument<'a> {
Argument {
name,
required: false,
requires: None,
forbids: vec![],
takes_value: false,
allow_multiple: false,
default_value: None,
help: None,
user_value: None,
}
}
/// Set if the argument *must* be provided by user.
pub fn required(mut self, required: bool) -> Self {
self.required = required;
self
}
/// Add `other_arg` as a required parameter when `self` is specified.
pub fn requires(mut self, other_arg: &'a str) -> Self {
self.requires = Some(other_arg);
self
}
/// Add `other_arg` as a forbidden parameter when `self` is specified.
pub fn forbids(mut self, args: Vec<&'a str>) -> Self {
self.forbids = args;
self
}
/// If `takes_value` is true, then the user *must* provide a value for the
/// argument, otherwise that argument is a flag.
pub fn takes_value(mut self, takes_value: bool) -> Self {
self.takes_value = takes_value;
self
}
/// If `allow_multiple` is true, then the user can provide multiple values for the
/// argument (e.g --arg val1 --arg val2). It sets the `takes_value` option to true,
/// so the user must provides at least one value.
pub fn allow_multiple(mut self, allow_multiple: bool) -> Self {
if allow_multiple {
self.takes_value = true;
}
self.allow_multiple = allow_multiple;
self
}
/// Keep a default value which will be used if the user didn't provide a value for
/// the argument.
pub fn default_value(mut self, default_value: &'a str) -> Self {
self.default_value = Some(Value::Single(String::from(default_value)));
self
}
/// Set the information that will be displayed for the argument when user passes
/// `--help` flag.
pub fn help(mut self, help: &'a str) -> Self {
self.help = Some(help);
self
}
fn format_help(&self, arg_width: usize) -> String {
let mut help_builder = vec![];
let arg = self.format_name();
help_builder.push(format!("{:<arg_width$}", arg, arg_width = arg_width));
// Add three whitespaces between the argument and its help message for readability.
help_builder.push(" ".to_string());
match (self.help, &self.default_value) {
(Some(help), Some(default_value)) => {
help_builder.push(format!("{} [default: {}]", help, default_value))
}
(Some(help), None) => help_builder.push(help.to_string()),
(None, Some(default_value)) => {
help_builder.push(format!("[default: {}]", default_value))
}
(None, None) => (),
};
help_builder.concat()
}
fn format_name(&self) -> String {
if self.takes_value {
format!(" --{name} <{name}>", name = self.name)
} else {
format!(" --{}", self.name)
}
}
}
/// Represents the type of argument, and the values it takes.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Value {
Flag,
Single(String),
Multiple(Vec<String>),
}
impl Value {
fn as_single_value(&self) -> Option<&String> {
match self {
Value::Single(s) => Some(s),
_ => None,
}
}
fn as_flag(&self) -> bool {
matches!(self, Value::Flag)
}
fn as_multiple(&self) -> Option<&[String]> {
match self {
Value::Multiple(v) => Some(v),
_ => None,
}
}
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Value::Flag => write!(f, "true"),
Value::Single(s) => write!(f, "\"{}\"", s),
Value::Multiple(v) => write!(f, "{:?}", v),
}
}
}
/// Stores the arguments of the parser.
#[derive(Debug, Clone, Default)]
pub struct Arguments<'a> {
// A BTreeMap in which the key is an argument and the value is its associated `Argument`.
args: BTreeMap<&'a str, Argument<'a>>,
// The arguments specified after `--` (i.e. end of command options).
extra_args: Vec<String>,
}
impl<'a> Arguments<'a> {
/// Add an argument with its associated `Argument` in `args`.
fn insert_arg(&mut self, argument: Argument<'a>) {
self.args.insert(argument.name, argument);
}
/// Get the value for the argument specified by `arg_name`.
fn value_of(&self, arg_name: &'static str) -> Option<&Value> {
self.args.get(arg_name).and_then(|argument| {
argument
.user_value
.as_ref()
.or(argument.default_value.as_ref())
})
}
/// Return the value of an argument if the argument exists and has the type
/// String. Otherwise return None.
pub fn single_value(&self, arg_name: &'static str) -> Option<&String> {
self.value_of(arg_name)
.and_then(|arg_value| arg_value.as_single_value())
}
/// Return whether an `arg_name` argument of type flag exists.
pub fn flag_present(&self, arg_name: &'static str) -> bool {
match self.value_of(arg_name) {
Some(v) => v.as_flag(),
None => false,
}
}
/// Return the value of an argument if the argument exists and has the type
/// vector. Otherwise return None.
pub fn multiple_values(&self, arg_name: &'static str) -> Option<&[String]> {
self.value_of(arg_name)
.and_then(|arg_value| arg_value.as_multiple())
}
/// Get the extra arguments (all arguments after `--`).
pub fn extra_args(&self) -> Vec<String> {
self.extra_args.clone()
}
// Split `args` in two slices: one with the actual arguments of the process and the other with
// the extra arguments, meaning all parameters specified after `--`.
fn split_args(args: &[String]) -> (&[String], &[String]) {
if let Some(index) = args.iter().position(|arg| arg == ARG_SEPARATOR) {
return (&args[..index], &args[index + 1..]);
}
(args, &[])
}
/// Collect the command line arguments and the values provided for them.
pub fn parse_from_cmdline(&mut self) -> Result<()> {
let args: Vec<String> = env::args().collect();
self.parse(&args)
}
/// Clear split between the actual arguments of the process, the extra arguments if any
/// and the `--help` and `--version` arguments if present.
pub fn parse(&mut self, args: &[String]) -> Result<()> {
// Skipping the first element of `args` as it is the name of the binary.
let (args, extra_args) = Arguments::split_args(&args[1..]);
self.extra_args = extra_args.to_vec();
// If `--help` or `-h`is provided as a parameter, we artificially skip the parsing of other
// command line arguments by adding just the help argument to the parsed list and
// returning.
if args.contains(&HELP_ARG.to_string()) || args.contains(&SHORT_HELP_ARG.to_string()) {
let mut help_arg = Argument::new("help").help("Show the help message.");
help_arg.user_value = Some(Value::Flag);
self.insert_arg(help_arg);
return Ok(());
}
// If `--version` is provided as a parameter, we artificially skip the parsing of other
// command line arguments by adding just the version argument to the parsed list and
// returning.
if args.contains(&VERSION_ARG.to_string()) {
let mut version_arg = Argument::new("version");
version_arg.user_value = Some(Value::Flag);
self.insert_arg(version_arg);
return Ok(());
}
// Otherwise, we continue the parsing of the other arguments.
self.populate_args(args)
}
// Check if `required`, `requires` and `forbids` field rules are indeed followed by every
// argument.
fn validate_requirements(&self, args: &[String]) -> Result<()> {
for argument in self.args.values() {
// The arguments that are marked `required` must be provided by user.
if argument.required && argument.user_value.is_none() {
return Err(UtilsArgParserError::MissingArgument(
argument.name.to_string(),
));
}
if argument.user_value.is_some() {
// For the arguments that require a specific argument to be also present in the list
// of arguments provided by user, search for that argument.
if let Some(arg_name) = argument.requires
&& !args.contains(&(format!("--{}", arg_name)))
{
return Err(UtilsArgParserError::MissingArgument(arg_name.to_string()));
}
// Check the user-provided list for potential forbidden arguments.
for arg_name in argument.forbids.iter() {
if args.contains(&(format!("--{}", arg_name))) {
return Err(UtilsArgParserError::ForbiddenArgument(
argument.name.to_string(),
arg_name.to_string(),
));
}
}
}
}
Ok(())
}
// Does a general validation of `arg` command line argument.
fn validate_arg(&self, arg: &str) -> Result<()> {
if !arg.starts_with(ARG_PREFIX) {
return Err(UtilsArgParserError::UnexpectedArgument(arg.to_string()));
}
let arg_name = &arg[ARG_PREFIX.len()..];
// Check if the argument is an expected one and, if yes, check that it was not
// provided more than once (unless allow_multiple is set).
let argument = self
.args
.get(arg_name)
.ok_or_else(|| UtilsArgParserError::UnexpectedArgument(arg_name.to_string()))?;
if !argument.allow_multiple && argument.user_value.is_some() {
return Err(UtilsArgParserError::DuplicateArgument(arg_name.to_string()));
}
Ok(())
}
/// Validate the arguments provided by user and their values. Insert those
/// values in the `Argument` instances of the corresponding arguments.
fn populate_args(&mut self, args: &[String]) -> Result<()> {
let mut iter = args.iter();
while let Some(arg) = iter.next() {
self.validate_arg(arg)?;
// If the `arg` argument is indeed an expected one, set the value provided by user
// if it's a valid one.
let argument = self.args.get_mut(&arg[ARG_PREFIX.len()..]).ok_or_else(|| {
UtilsArgParserError::UnexpectedArgument(arg[ARG_PREFIX.len()..].to_string())
})?;
let arg_val = if argument.takes_value {
let val = iter
.next()
.filter(|v| !v.starts_with(ARG_PREFIX))
.ok_or_else(|| UtilsArgParserError::MissingValue(argument.name.to_string()))?
.clone();
if argument.allow_multiple {
match argument.user_value.take() {
Some(Value::Multiple(mut v)) => {
v.push(val);
Value::Multiple(v)
}
None => Value::Multiple(vec![val]),
_ => {
return Err(UtilsArgParserError::UnexpectedArgument(
argument.name.to_string(),
));
}
}
} else {
Value::Single(val)
}
} else {
Value::Flag
};
argument.user_value = Some(arg_val);
}
// Check the constraints for the `required`, `requires` and `forbids` fields of all
// arguments.
self.validate_requirements(args)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::arg_parser::Value;
fn build_arg_parser() -> ArgParser<'static> {
ArgParser::new()
.arg(
Argument::new("exec-file")
.required(true)
.takes_value(true)
.help("'exec-file' info."),
)
.arg(
Argument::new("no-api")
.requires("config-file")
.takes_value(false)
.help("'no-api' info."),
)
.arg(
Argument::new("api-sock")
.takes_value(true)
.default_value("socket")
.help("'api-sock' info."),
)
.arg(
Argument::new("id")
.takes_value(true)
.default_value("instance")
.help("'id' info."),
)
.arg(
Argument::new("seccomp-filter")
.takes_value(true)
.help("'seccomp-filter' info.")
.forbids(vec!["no-seccomp"]),
)
.arg(
Argument::new("no-seccomp")
.help("'-no-seccomp' info.")
.forbids(vec!["seccomp-filter"]),
)
.arg(
Argument::new("config-file")
.takes_value(true)
.help("'config-file' info."),
)
.arg(
Argument::new("describe-snapshot")
.takes_value(true)
.help("'describe-snapshot' info."),
)
}
#[test]
fn test_arg_help() {
// Checks help format for an argument.
let width = 32;
let short_width = 16;
let mut argument = Argument::new("exec-file").takes_value(false);
assert_eq!(
argument.format_help(width),
" --exec-file "
);
assert_eq!(argument.format_help(short_width), " --exec-file ");
argument = Argument::new("exec-file").takes_value(true);
assert_eq!(
argument.format_help(width),
" --exec-file <exec-file> "
);
assert_eq!(
argument.format_help(short_width),
" --exec-file <exec-file> "
);
argument = Argument::new("exec-file")
.takes_value(true)
.help("'exec-file' info.");
assert_eq!(
argument.format_help(width),
" --exec-file <exec-file> 'exec-file' info."
);
assert_eq!(
argument.format_help(short_width),
" --exec-file <exec-file> 'exec-file' info."
);
argument = Argument::new("exec-file")
.takes_value(true)
.default_value("./exec-file");
assert_eq!(
argument.format_help(width),
" --exec-file <exec-file> [default: \"./exec-file\"]"
);
assert_eq!(
argument.format_help(short_width),
" --exec-file <exec-file> [default: \"./exec-file\"]"
);
argument = Argument::new("exec-file")
.takes_value(true)
.default_value("./exec-file")
.help("'exec-file' info.");
assert_eq!(
argument.format_help(width),
" --exec-file <exec-file> 'exec-file' info. [default: \"./exec-file\"]"
);
assert_eq!(
argument.format_help(short_width),
" --exec-file <exec-file> 'exec-file' info. [default: \"./exec-file\"]"
);
}
#[test]
fn test_arg_parser_help() {
// Checks help information when user passes `--help` flag.
let mut arg_parser = ArgParser::new()
.arg(
Argument::new("exec-file")
.required(true)
.takes_value(true)
.help("'exec-file' info."),
)
.arg(
Argument::new("api-sock")
.takes_value(true)
.help("'api-sock' info."),
);
assert_eq!(
arg_parser.formatted_help(),
"required arguments:\n --exec-file <exec-file> 'exec-file' info.\n\noptional \
arguments:\n --api-sock <api-sock> 'api-sock' info."
);
arg_parser = ArgParser::new()
.arg(Argument::new("id").takes_value(true).help("'id' info."))
.arg(
Argument::new("seccomp-filter")
.takes_value(true)
.help("'seccomp-filter' info."),
)
.arg(
Argument::new("config-file")
.takes_value(true)
.help("'config-file' info."),
);
assert_eq!(
arg_parser.formatted_help(),
"optional arguments:\n --config-file <config-file> 'config-file' info.\n \
--id <id> 'id' info.\n --seccomp-filter <seccomp-filter> \
'seccomp-filter' info."
);
}
#[test]
fn test_value() {
// Test `as_string()` and `as_flag()` functions behaviour.
let mut value = Value::Flag;
assert!(Value::as_single_value(&value).is_none());
value = Value::Single("arg".to_string());
assert_eq!(Value::as_single_value(&value).unwrap(), "arg");
value = Value::Single("arg".to_string());
assert!(!Value::as_flag(&value));
value = Value::Flag;
assert!(Value::as_flag(&value));
}
#[test]
fn test_parse() {
let arg_parser = build_arg_parser();
// Test different scenarios for the command line arguments provided by user.
let mut arguments = arg_parser.arguments().clone();
let args = vec!["binary-name", "--exec-file", "foo", "--help"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
arguments.parse(&args).unwrap();
assert!(arguments.args.contains_key("help"));
arguments = arg_parser.arguments().clone();
let args = vec!["binary-name", "--exec-file", "foo", "-h"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
arguments.parse(&args).unwrap();
assert!(arguments.args.contains_key("help"));
arguments = arg_parser.arguments().clone();
let args = vec!["binary-name", "--exec-file", "foo", "--version"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
arguments.parse(&args).unwrap();
assert!(arguments.args.contains_key("version"));
arguments = arg_parser.arguments().clone();
let args = vec!["binary-name", "--exec-file", "foo", "--describe-snapshot"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::MissingValue(
"describe-snapshot".to_string()
))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--describe-snapshot",
"--",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::MissingValue(
"describe-snapshot".to_string()
))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"--id",
"bar",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::MissingValue("api-sock".to_string()))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"bar",
"--api-sock",
"foobar",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::DuplicateArgument(
"api-sock".to_string()
))
);
arguments = arg_parser.arguments().clone();
let args = vec!["binary-name", "--api-sock", "foo"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::MissingArgument(
"exec-file".to_string()
))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"bar",
"--invalid-arg",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::UnexpectedArgument(
"invalid-arg".to_string()
))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"bar",
"--id",
"foobar",
"--no-api",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::MissingArgument(
"config-file".to_string()
))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"bar",
"--id",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::MissingValue("id".to_string()))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--config-file",
"bar",
"--no-api",
"foobar",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::UnexpectedArgument(
"foobar".to_string()
))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"bar",
"--id",
"foobar",
"--seccomp-filter",
"0",
"--no-seccomp",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::ForbiddenArgument(
"no-seccomp".to_string(),
"seccomp-filter".to_string(),
))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"bar",
"--id",
"foobar",
"--no-seccomp",
"--seccomp-filter",
"0",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::ForbiddenArgument(
"no-seccomp".to_string(),
"seccomp-filter".to_string(),
))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"bar",
"foobar",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::UnexpectedArgument(
"foobar".to_string()
))
);
arguments = arg_parser.arguments().clone();
let args = vec!["binary-name", "foo"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
assert_eq!(
arguments.parse(&args),
Err(UtilsArgParserError::UnexpectedArgument("foo".to_string()))
);
arguments = arg_parser.arguments().clone();
let args = vec![
"binary-name",
"--exec-file",
"foo",
"--api-sock",
"bar",
"--id",
"foobar",
"--seccomp-filter",
"0",
"--",
"--extra-flag",
]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
arguments.parse(&args).unwrap();
assert!(arguments.extra_args.contains(&"--extra-flag".to_string()));
}
#[test]
fn test_split() {
let mut args = vec!["--exec-file", "foo", "--", "--extra-arg-1", "--extra-arg-2"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
let (left, right) = Arguments::split_args(&args);
assert_eq!(left.to_vec(), vec!["--exec-file", "foo"]);
assert_eq!(right.to_vec(), vec!["--extra-arg-1", "--extra-arg-2"]);
args = vec!["--exec-file", "foo", "--"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
let (left, right) = Arguments::split_args(&args);
assert_eq!(left.to_vec(), vec!["--exec-file", "foo"]);
assert!(right.is_empty());
args = vec!["--exec-file", "foo"]
.into_iter()
.map(String::from)
.collect::<Vec<String>>();
let (left, right) = Arguments::split_args(&args);
assert_eq!(left.to_vec(), vec!["--exec-file", "foo"]);
assert!(right.is_empty());
}
#[test]
fn test_error_display() {
assert_eq!(
format!(
"{}",
UtilsArgParserError::ForbiddenArgument("foo".to_string(), "bar".to_string())
),
"Argument 'bar' cannot be used together with argument 'foo'."
);
assert_eq!(
format!(
"{}",
UtilsArgParserError::MissingArgument("foo".to_string())
),
"Argument 'foo' required, but not found."
);
assert_eq!(
format!("{}", UtilsArgParserError::MissingValue("foo".to_string())),
"The argument 'foo' requires a value, but none was supplied."
);
assert_eq!(
format!(
"{}",
UtilsArgParserError::UnexpectedArgument("foo".to_string())
),
"Found argument 'foo' which wasn't expected, or isn't valid in this context."
);
assert_eq!(
format!(
"{}",
UtilsArgParserError::DuplicateArgument("foo".to_string())
),
"The argument 'foo' was provided more than once."
);
}
#[test]
fn test_value_display() {
assert_eq!(format!("{}", Value::Flag), "true");
assert_eq!(format!("{}", Value::Single("foo".to_string())), "\"foo\"");
}
#[test]
fn test_allow_multiple() {
let arg_parser = ArgParser::new()
.arg(
Argument::new("no-multiple")
.takes_value(true)
.help("argument that takes just one value."),
)
.arg(
Argument::new("multiple")
.allow_multiple(true)
.help("argument that allows duplication."),
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/utils/src/validators.rs | src/utils/src/validators.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Misc data format validations, shared by multiple Firecracker components.
const MAX_INSTANCE_ID_LEN: usize = 64;
const MIN_INSTANCE_ID_LEN: usize = 1;
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum ValidatorError {
/// Invalid char ({0}) at position {1}
InvalidChar(char, usize), // (char, position)
/// Invalid len ({0}); the length must be between {1} and {2}
InvalidLen(usize, usize, usize), // (length, min, max)
}
/// Checks that the instance id only contains alphanumeric chars and hyphens
/// and that the size is between 1 and 64 characters.
pub fn validate_instance_id(input: &str) -> Result<(), ValidatorError> {
if input.len() > MAX_INSTANCE_ID_LEN || input.len() < MIN_INSTANCE_ID_LEN {
return Err(ValidatorError::InvalidLen(
input.len(),
MIN_INSTANCE_ID_LEN,
MAX_INSTANCE_ID_LEN,
));
}
for (i, c) in input.chars().enumerate() {
if !(c == '-' || c.is_alphanumeric()) {
return Err(ValidatorError::InvalidChar(c, i));
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_validate_instance_id() {
assert_eq!(
format!("{}", validate_instance_id("").unwrap_err()),
"Invalid len (0); the length must be between 1 and 64"
);
validate_instance_id("12-3aa").unwrap();
assert_eq!(
format!("{}", validate_instance_id("12_3aa").unwrap_err()),
"Invalid char (_) at position 2"
);
assert_eq!(
validate_instance_id("12:3aa").unwrap_err(),
ValidatorError::InvalidChar(':', 2)
);
assert_eq!(
validate_instance_id(str::repeat("a", MAX_INSTANCE_ID_LEN + 1).as_str()).unwrap_err(),
ValidatorError::InvalidLen(
MAX_INSTANCE_ID_LEN + 1,
MIN_INSTANCE_ID_LEN,
MAX_INSTANCE_ID_LEN
)
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/persist.rs | src/vmm/src/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines state structures for saving/restoring a Firecracker microVM.
use std::fmt::Debug;
use std::fs::{File, OpenOptions};
use std::io::{self, Write};
use std::mem::forget;
use std::os::unix::io::AsRawFd;
use std::os::unix::net::UnixStream;
use std::path::Path;
use std::sync::{Arc, Mutex};
use semver::Version;
use serde::{Deserialize, Serialize};
use userfaultfd::{FeatureFlags, Uffd, UffdBuilder};
use vmm_sys_util::sock_ctrl_msg::ScmSocket;
#[cfg(target_arch = "aarch64")]
use crate::arch::aarch64::vcpu::get_manufacturer_id_from_host;
use crate::builder::{self, BuildMicrovmFromSnapshotError};
use crate::cpu_config::templates::StaticCpuTemplate;
#[cfg(target_arch = "x86_64")]
use crate::cpu_config::x86_64::cpuid::CpuidTrait;
#[cfg(target_arch = "x86_64")]
use crate::cpu_config::x86_64::cpuid::common::get_vendor_id_from_host;
use crate::device_manager::{DevicePersistError, DevicesState};
use crate::logger::{info, warn};
use crate::resources::VmResources;
use crate::seccomp::BpfThreadMap;
use crate::snapshot::Snapshot;
use crate::utils::u64_to_usize;
use crate::vmm_config::boot_source::BootSourceConfig;
use crate::vmm_config::instance_info::InstanceInfo;
use crate::vmm_config::machine_config::{HugePageConfig, MachineConfigError, MachineConfigUpdate};
use crate::vmm_config::snapshot::{CreateSnapshotParams, LoadSnapshotParams, MemBackendType};
use crate::vstate::kvm::KvmState;
use crate::vstate::memory::{
self, GuestMemoryState, GuestRegionMmap, GuestRegionType, MemoryError,
};
use crate::vstate::vcpu::{VcpuSendEventError, VcpuState};
use crate::vstate::vm::{VmError, VmState};
use crate::{EventManager, Vmm, vstate};
/// Holds information related to the VM that is not part of VmState.
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)]
pub struct VmInfo {
/// Guest memory size.
pub mem_size_mib: u64,
/// smt information
pub smt: bool,
/// CPU template type
pub cpu_template: StaticCpuTemplate,
/// Boot source information.
pub boot_source: BootSourceConfig,
/// Huge page configuration
pub huge_pages: HugePageConfig,
}
impl From<&VmResources> for VmInfo {
fn from(value: &VmResources) -> Self {
Self {
mem_size_mib: value.machine_config.mem_size_mib as u64,
smt: value.machine_config.smt,
cpu_template: StaticCpuTemplate::from(&value.machine_config.cpu_template),
boot_source: value.boot_source.config.clone(),
huge_pages: value.machine_config.huge_pages,
}
}
}
/// Contains the necessary state for saving/restoring a microVM.
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct MicrovmState {
/// Miscellaneous VM info.
pub vm_info: VmInfo,
/// KVM KVM state.
pub kvm_state: KvmState,
/// VM KVM state.
pub vm_state: VmState,
/// Vcpu states.
pub vcpu_states: Vec<VcpuState>,
/// Device states.
pub device_states: DevicesState,
}
/// This describes the mapping between Firecracker base virtual address and
/// offset in the buffer or file backend for a guest memory region. It is used
/// to tell an external process/thread where to populate the guest memory data
/// for this range.
///
/// E.g. Guest memory contents for a region of `size` bytes can be found in the
/// backend at `offset` bytes from the beginning, and should be copied/populated
/// into `base_host_address`.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct GuestRegionUffdMapping {
/// Base host virtual address where the guest memory contents for this
/// region should be copied/populated.
pub base_host_virt_addr: u64,
/// Region size.
pub size: usize,
/// Offset in the backend file/buffer where the region contents are.
pub offset: u64,
/// The configured page size for this memory region.
pub page_size: usize,
/// The configured page size **in bytes** for this memory region. The name is
/// wrong but cannot be changed due to being API, so this field is deprecated,
/// to be removed in 2.0.
#[deprecated]
pub page_size_kib: usize,
}
/// Errors related to saving and restoring Microvm state.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MicrovmStateError {
/// Operation not allowed: {0}
NotAllowed(String),
/// Cannot restore devices: {0}
RestoreDevices(#[from] DevicePersistError),
/// Cannot save Vcpu state: {0}
SaveVcpuState(vstate::vcpu::VcpuError),
/// Cannot save Vm state: {0}
SaveVmState(vstate::vm::ArchVmError),
/// Cannot signal Vcpu: {0}
SignalVcpu(VcpuSendEventError),
/// Vcpu is in unexpected state.
UnexpectedVcpuResponse,
}
/// Errors associated with creating a snapshot.
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum CreateSnapshotError {
/// Cannot get dirty bitmap: {0}
DirtyBitmap(#[from] VmError),
/// Cannot write memory file: {0}
Memory(#[from] MemoryError),
/// Cannot perform {0} on the memory backing file: {1}
MemoryBackingFile(&'static str, io::Error),
/// Cannot save the microVM state: {0}
MicrovmState(MicrovmStateError),
/// Cannot serialize the microVM state: {0}
SerializeMicrovmState(#[from] crate::snapshot::SnapshotError),
/// Cannot perform {0} on the snapshot backing file: {1}
SnapshotBackingFile(&'static str, io::Error),
}
/// Snapshot version
pub const SNAPSHOT_VERSION: Version = Version::new(8, 0, 0);
/// Creates a Microvm snapshot.
pub fn create_snapshot(
vmm: &mut Vmm,
vm_info: &VmInfo,
params: &CreateSnapshotParams,
) -> Result<(), CreateSnapshotError> {
let microvm_state = vmm
.save_state(vm_info)
.map_err(CreateSnapshotError::MicrovmState)?;
snapshot_state_to_file(µvm_state, ¶ms.snapshot_path)?;
vmm.vm
.snapshot_memory_to_file(¶ms.mem_file_path, params.snapshot_type)?;
// We need to mark queues as dirty again for all activated devices. The reason we
// do it here is that we don't mark pages as dirty during runtime
// for queue objects.
vmm.device_manager
.mark_virtio_queue_memory_dirty(vmm.vm.guest_memory());
Ok(())
}
fn snapshot_state_to_file(
microvm_state: &MicrovmState,
snapshot_path: &Path,
) -> Result<(), CreateSnapshotError> {
use self::CreateSnapshotError::*;
let mut snapshot_file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(snapshot_path)
.map_err(|err| SnapshotBackingFile("open", err))?;
let snapshot = Snapshot::new(microvm_state);
snapshot.save(&mut snapshot_file)?;
snapshot_file
.flush()
.map_err(|err| SnapshotBackingFile("flush", err))?;
snapshot_file
.sync_all()
.map_err(|err| SnapshotBackingFile("sync_all", err))
}
/// Validates that snapshot CPU vendor matches the host CPU vendor.
///
/// # Errors
///
/// When:
/// - Failed to read host vendor.
/// - Failed to read snapshot vendor.
#[cfg(target_arch = "x86_64")]
pub fn validate_cpu_vendor(microvm_state: &MicrovmState) {
let host_vendor_id = get_vendor_id_from_host();
let snapshot_vendor_id = microvm_state.vcpu_states[0].cpuid.vendor_id();
match (host_vendor_id, snapshot_vendor_id) {
(Ok(host_id), Some(snapshot_id)) => {
info!("Host CPU vendor ID: {host_id:?}");
info!("Snapshot CPU vendor ID: {snapshot_id:?}");
if host_id != snapshot_id {
warn!("Host CPU vendor ID differs from the snapshotted one",);
}
}
(Ok(host_id), None) => {
info!("Host CPU vendor ID: {host_id:?}");
warn!("Snapshot CPU vendor ID: couldn't get from the snapshot");
}
(Err(_), Some(snapshot_id)) => {
warn!("Host CPU vendor ID: couldn't get from the host");
info!("Snapshot CPU vendor ID: {snapshot_id:?}");
}
(Err(_), None) => {
warn!("Host CPU vendor ID: couldn't get from the host");
warn!("Snapshot CPU vendor ID: couldn't get from the snapshot");
}
}
}
/// Validate that Snapshot Manufacturer ID matches
/// the one from the Host
///
/// The manufacturer ID for the Snapshot is taken from each VCPU state.
/// # Errors
///
/// When:
/// - Failed to read host vendor.
/// - Failed to read snapshot vendor.
#[cfg(target_arch = "aarch64")]
pub fn validate_cpu_manufacturer_id(microvm_state: &MicrovmState) {
let host_cpu_id = get_manufacturer_id_from_host();
let snapshot_cpu_id = microvm_state.vcpu_states[0].regs.manifacturer_id();
match (host_cpu_id, snapshot_cpu_id) {
(Some(host_id), Some(snapshot_id)) => {
info!("Host CPU manufacturer ID: {host_id:?}");
info!("Snapshot CPU manufacturer ID: {snapshot_id:?}");
if host_id != snapshot_id {
warn!("Host CPU manufacturer ID differs from the snapshotted one",);
}
}
(Some(host_id), None) => {
info!("Host CPU manufacturer ID: {host_id:?}");
warn!("Snapshot CPU manufacturer ID: couldn't get from the snapshot");
}
(None, Some(snapshot_id)) => {
warn!("Host CPU manufacturer ID: couldn't get from the host");
info!("Snapshot CPU manufacturer ID: {snapshot_id:?}");
}
(None, None) => {
warn!("Host CPU manufacturer ID: couldn't get from the host");
warn!("Snapshot CPU manufacturer ID: couldn't get from the snapshot");
}
}
}
/// Error type for [`snapshot_state_sanity_check`].
#[derive(Debug, thiserror::Error, displaydoc::Display, PartialEq, Eq)]
pub enum SnapShotStateSanityCheckError {
/// No memory region defined.
NoMemory,
/// No DRAM memory region defined.
NoDramMemory,
/// DRAM memory has more than a single slot.
DramMemoryTooManySlots,
/// DRAM memory is unplugged.
DramMemoryUnplugged,
}
/// Performs sanity checks against the state file and returns specific errors.
pub fn snapshot_state_sanity_check(
microvm_state: &MicrovmState,
) -> Result<(), SnapShotStateSanityCheckError> {
// Check that the snapshot contains at least 1 mem region, that at least one is Dram,
// and that Dram region contains a single plugged slot.
// Upper bound check will be done when creating guest memory by comparing against
// KVM max supported value kvm_context.max_memslots().
let regions = µvm_state.vm_state.memory.regions;
if regions.is_empty() {
return Err(SnapShotStateSanityCheckError::NoMemory);
}
if !regions
.iter()
.any(|r| r.region_type == GuestRegionType::Dram)
{
return Err(SnapShotStateSanityCheckError::NoDramMemory);
}
for dram_region in regions
.iter()
.filter(|r| r.region_type == GuestRegionType::Dram)
{
if dram_region.plugged.len() != 1 {
return Err(SnapShotStateSanityCheckError::DramMemoryTooManySlots);
}
if !dram_region.plugged[0] {
return Err(SnapShotStateSanityCheckError::DramMemoryUnplugged);
}
}
#[cfg(target_arch = "x86_64")]
validate_cpu_vendor(microvm_state);
#[cfg(target_arch = "aarch64")]
validate_cpu_manufacturer_id(microvm_state);
Ok(())
}
/// Error type for [`restore_from_snapshot`].
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum RestoreFromSnapshotError {
/// Failed to get snapshot state from file: {0}
File(#[from] SnapshotStateFromFileError),
/// Invalid snapshot state: {0}
Invalid(#[from] SnapShotStateSanityCheckError),
/// Failed to load guest memory: {0}
GuestMemory(#[from] RestoreFromSnapshotGuestMemoryError),
/// Failed to build microVM from snapshot: {0}
Build(#[from] BuildMicrovmFromSnapshotError),
}
/// Sub-Error type for [`restore_from_snapshot`] to contain either [`GuestMemoryFromFileError`] or
/// [`GuestMemoryFromUffdError`] within [`RestoreFromSnapshotError`].
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum RestoreFromSnapshotGuestMemoryError {
/// Error creating guest memory from file: {0}
File(#[from] GuestMemoryFromFileError),
/// Error creating guest memory from uffd: {0}
Uffd(#[from] GuestMemoryFromUffdError),
}
/// Loads a Microvm snapshot producing a 'paused' Microvm.
pub fn restore_from_snapshot(
instance_info: &InstanceInfo,
event_manager: &mut EventManager,
seccomp_filters: &BpfThreadMap,
params: &LoadSnapshotParams,
vm_resources: &mut VmResources,
) -> Result<Arc<Mutex<Vmm>>, RestoreFromSnapshotError> {
let mut microvm_state = snapshot_state_from_file(¶ms.snapshot_path)?;
for entry in ¶ms.network_overrides {
microvm_state
.device_states
.mmio_state
.net_devices
.iter_mut()
.map(|device| &mut device.device_state)
.chain(
microvm_state
.device_states
.pci_state
.net_devices
.iter_mut()
.map(|device| &mut device.device_state),
)
.find(|x| x.id == entry.iface_id)
.map(|device_state| device_state.tap_if_name.clone_from(&entry.host_dev_name))
.ok_or(SnapshotStateFromFileError::UnknownNetworkDevice)?;
}
let track_dirty_pages = params.track_dirty_pages;
let vcpu_count = microvm_state
.vcpu_states
.len()
.try_into()
.map_err(|_| MachineConfigError::InvalidVcpuCount)
.map_err(BuildMicrovmFromSnapshotError::VmUpdateConfig)?;
vm_resources
.update_machine_config(&MachineConfigUpdate {
vcpu_count: Some(vcpu_count),
mem_size_mib: Some(u64_to_usize(microvm_state.vm_info.mem_size_mib)),
smt: Some(microvm_state.vm_info.smt),
cpu_template: Some(microvm_state.vm_info.cpu_template),
track_dirty_pages: Some(track_dirty_pages),
huge_pages: Some(microvm_state.vm_info.huge_pages),
#[cfg(feature = "gdb")]
gdb_socket_path: None,
})
.map_err(BuildMicrovmFromSnapshotError::VmUpdateConfig)?;
// Some sanity checks before building the microvm.
snapshot_state_sanity_check(µvm_state)?;
let mem_backend_path = ¶ms.mem_backend.backend_path;
let mem_state = µvm_state.vm_state.memory;
let (guest_memory, uffd) = match params.mem_backend.backend_type {
MemBackendType::File => {
if vm_resources.machine_config.huge_pages.is_hugetlbfs() {
return Err(RestoreFromSnapshotGuestMemoryError::File(
GuestMemoryFromFileError::HugetlbfsSnapshot,
)
.into());
}
(
guest_memory_from_file(mem_backend_path, mem_state, track_dirty_pages)
.map_err(RestoreFromSnapshotGuestMemoryError::File)?,
None,
)
}
MemBackendType::Uffd => guest_memory_from_uffd(
mem_backend_path,
mem_state,
track_dirty_pages,
vm_resources.machine_config.huge_pages,
)
.map_err(RestoreFromSnapshotGuestMemoryError::Uffd)?,
};
builder::build_microvm_from_snapshot(
instance_info,
event_manager,
microvm_state,
guest_memory,
uffd,
seccomp_filters,
vm_resources,
)
.map_err(RestoreFromSnapshotError::Build)
}
/// Error type for [`snapshot_state_from_file`]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum SnapshotStateFromFileError {
/// Failed to open snapshot file: {0}
Open(#[from] std::io::Error),
/// Failed to load snapshot state from file: {0}
Load(#[from] crate::snapshot::SnapshotError),
/// Unknown Network Device.
UnknownNetworkDevice,
}
fn snapshot_state_from_file(
snapshot_path: &Path,
) -> Result<MicrovmState, SnapshotStateFromFileError> {
let mut snapshot_reader = File::open(snapshot_path)?;
let snapshot = Snapshot::load(&mut snapshot_reader)?;
Ok(snapshot.data)
}
/// Error type for [`guest_memory_from_file`].
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum GuestMemoryFromFileError {
/// Failed to load guest memory: {0}
File(#[from] std::io::Error),
/// Failed to restore guest memory: {0}
Restore(#[from] MemoryError),
/// Cannot restore hugetlbfs backed snapshot by mapping the memory file. Please use uffd.
HugetlbfsSnapshot,
}
fn guest_memory_from_file(
mem_file_path: &Path,
mem_state: &GuestMemoryState,
track_dirty_pages: bool,
) -> Result<Vec<GuestRegionMmap>, GuestMemoryFromFileError> {
let mem_file = File::open(mem_file_path)?;
let guest_mem = memory::snapshot_file(mem_file, mem_state.regions(), track_dirty_pages)?;
Ok(guest_mem)
}
/// Error type for [`guest_memory_from_uffd`]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum GuestMemoryFromUffdError {
/// Failed to restore guest memory: {0}
Restore(#[from] MemoryError),
/// Failed to UFFD object: {0}
Create(userfaultfd::Error),
/// Failed to register memory address range with the userfaultfd object: {0}
Register(userfaultfd::Error),
/// Failed to connect to UDS Unix stream: {0}
Connect(#[from] std::io::Error),
/// Failed to sends file descriptor: {0}
Send(#[from] vmm_sys_util::errno::Error),
}
fn guest_memory_from_uffd(
mem_uds_path: &Path,
mem_state: &GuestMemoryState,
track_dirty_pages: bool,
huge_pages: HugePageConfig,
) -> Result<(Vec<GuestRegionMmap>, Option<Uffd>), GuestMemoryFromUffdError> {
let (guest_memory, backend_mappings) =
create_guest_memory(mem_state, track_dirty_pages, huge_pages)?;
let mut uffd_builder = UffdBuilder::new();
// We only make use of this if balloon devices are present, but we can enable it unconditionally
// because the only place the kernel checks this is in a hook from madvise, e.g. it doesn't
// actively change the behavior of UFFD, only passively. Without balloon devices
// we never call madvise anyway, so no need to put this into a conditional.
uffd_builder.require_features(FeatureFlags::EVENT_REMOVE);
let uffd = uffd_builder
.close_on_exec(true)
.non_blocking(true)
.user_mode_only(false)
.create()
.map_err(GuestMemoryFromUffdError::Create)?;
for mem_region in guest_memory.iter() {
uffd.register(mem_region.as_ptr().cast(), mem_region.size() as _)
.map_err(GuestMemoryFromUffdError::Register)?;
}
send_uffd_handshake(mem_uds_path, &backend_mappings, &uffd)?;
Ok((guest_memory, Some(uffd)))
}
fn create_guest_memory(
mem_state: &GuestMemoryState,
track_dirty_pages: bool,
huge_pages: HugePageConfig,
) -> Result<(Vec<GuestRegionMmap>, Vec<GuestRegionUffdMapping>), GuestMemoryFromUffdError> {
let guest_memory = memory::anonymous(mem_state.regions(), track_dirty_pages, huge_pages)?;
let mut backend_mappings = Vec::with_capacity(guest_memory.len());
let mut offset = 0;
for mem_region in guest_memory.iter() {
#[allow(deprecated)]
backend_mappings.push(GuestRegionUffdMapping {
base_host_virt_addr: mem_region.as_ptr() as u64,
size: mem_region.size(),
offset,
page_size: huge_pages.page_size(),
page_size_kib: huge_pages.page_size(),
});
offset += mem_region.size() as u64;
}
Ok((guest_memory, backend_mappings))
}
fn send_uffd_handshake(
mem_uds_path: &Path,
backend_mappings: &[GuestRegionUffdMapping],
uffd: &impl AsRawFd,
) -> Result<(), GuestMemoryFromUffdError> {
// This is safe to unwrap() because we control the contents of the vector
// (i.e GuestRegionUffdMapping entries).
let backend_mappings = serde_json::to_string(backend_mappings).unwrap();
let socket = UnixStream::connect(mem_uds_path)?;
socket.send_with_fd(
backend_mappings.as_bytes(),
// In the happy case we can close the fd since the other process has it open and is
// using it to serve us pages.
//
// The problem is that if other process crashes/exits, firecracker guest memory
// will simply revert to anon-mem behavior which would lead to silent errors and
// undefined behavior.
//
// To tackle this scenario, the page fault handler can notify Firecracker of any
// crashes/exits. There is no need for Firecracker to explicitly send its process ID.
// The external process can obtain Firecracker's PID by calling `getsockopt` with
// `libc::SO_PEERCRED` option like so:
//
// let mut val = libc::ucred { pid: 0, gid: 0, uid: 0 };
// let mut ucred_size: u32 = mem::size_of::<libc::ucred>() as u32;
// libc::getsockopt(
// socket.as_raw_fd(),
// libc::SOL_SOCKET,
// libc::SO_PEERCRED,
// &mut val as *mut _ as *mut _,
// &mut ucred_size as *mut libc::socklen_t,
// );
//
// Per this linux man page: https://man7.org/linux/man-pages/man7/unix.7.html,
// `SO_PEERCRED` returns the credentials (PID, UID and GID) of the peer process
// connected to this socket. The returned credentials are those that were in effect
// at the time of the `connect` call.
//
// Moreover, Firecracker holds a copy of the UFFD fd as well, so that even if the
// page fault handler process does not tear down Firecracker when necessary, the
// uffd will still be alive but with no one to serve faults, leading to guest freeze.
uffd.as_raw_fd(),
)?;
// We prevent Rust from closing the socket file descriptor to avoid a potential race condition
// between the mappings message and the connection shutdown. If the latter arrives at the UFFD
// handler first, the handler never sees the mappings.
forget(socket);
Ok(())
}
#[cfg(test)]
mod tests {
use std::os::unix::net::UnixListener;
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::Vmm;
#[cfg(target_arch = "x86_64")]
use crate::builder::tests::insert_vmclock_device;
#[cfg(target_arch = "x86_64")]
use crate::builder::tests::insert_vmgenid_device;
use crate::builder::tests::{
CustomBlockConfig, default_kernel_cmdline, default_vmm, insert_balloon_device,
insert_block_devices, insert_net_device, insert_vsock_device,
};
#[cfg(target_arch = "aarch64")]
use crate::construct_kvm_mpidrs;
use crate::devices::virtio::block::CacheType;
use crate::snapshot::Persist;
use crate::vmm_config::balloon::BalloonDeviceConfig;
use crate::vmm_config::net::NetworkInterfaceConfig;
use crate::vmm_config::vsock::tests::default_config;
use crate::vstate::memory::{GuestMemoryRegionState, GuestRegionType};
fn default_vmm_with_devices() -> Vmm {
let mut event_manager = EventManager::new().expect("Cannot create EventManager");
let mut vmm = default_vmm();
let mut cmdline = default_kernel_cmdline();
// Add a balloon device.
let balloon_config = BalloonDeviceConfig {
amount_mib: 0,
deflate_on_oom: false,
stats_polling_interval_s: 0,
free_page_hinting: false,
free_page_reporting: false,
};
insert_balloon_device(&mut vmm, &mut cmdline, &mut event_manager, balloon_config);
// Add a block device.
let drive_id = String::from("root");
let block_configs = vec![CustomBlockConfig::new(
drive_id,
true,
None,
true,
CacheType::Unsafe,
)];
insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
// Add net device.
let network_interface = NetworkInterfaceConfig {
iface_id: String::from("netif"),
host_dev_name: String::from("hostname"),
guest_mac: None,
rx_rate_limiter: None,
tx_rate_limiter: None,
};
insert_net_device(
&mut vmm,
&mut cmdline,
&mut event_manager,
network_interface,
);
// Add vsock device.
let mut tmp_sock_file = TempFile::new().unwrap();
tmp_sock_file.remove().unwrap();
let vsock_config = default_config(&tmp_sock_file);
insert_vsock_device(&mut vmm, &mut cmdline, &mut event_manager, vsock_config);
#[cfg(target_arch = "x86_64")]
insert_vmgenid_device(&mut vmm);
#[cfg(target_arch = "x86_64")]
insert_vmclock_device(&mut vmm);
vmm
}
#[test]
fn test_microvm_state_snapshot() {
let vmm = default_vmm_with_devices();
let states = vmm.device_manager.save();
// Only checking that all devices are saved, actual device state
// is tested by that device's tests.
assert_eq!(states.mmio_state.block_devices.len(), 1);
assert_eq!(states.mmio_state.net_devices.len(), 1);
assert!(states.mmio_state.vsock_device.is_some());
assert!(states.mmio_state.balloon_device.is_some());
let vcpu_states = vec![VcpuState::default()];
#[cfg(target_arch = "aarch64")]
let mpidrs = construct_kvm_mpidrs(&vcpu_states);
let microvm_state = MicrovmState {
device_states: states,
vcpu_states,
kvm_state: Default::default(),
vm_info: VmInfo {
mem_size_mib: 1u64,
..Default::default()
},
#[cfg(target_arch = "aarch64")]
vm_state: vmm.vm.save_state(&mpidrs).unwrap(),
#[cfg(target_arch = "x86_64")]
vm_state: vmm.vm.save_state().unwrap(),
};
let mut buf = vec![0; 10000];
Snapshot::new(µvm_state)
.save(&mut buf.as_mut_slice())
.unwrap();
let restored_microvm_state: MicrovmState = Snapshot::load_without_crc_check(buf.as_slice())
.unwrap()
.data;
assert_eq!(restored_microvm_state.vm_info, microvm_state.vm_info);
assert_eq!(
restored_microvm_state.device_states.mmio_state,
microvm_state.device_states.mmio_state
)
}
#[test]
fn test_create_guest_memory() {
let mem_state = GuestMemoryState {
regions: vec![GuestMemoryRegionState {
base_address: 0,
size: 0x20000,
region_type: GuestRegionType::Dram,
plugged: vec![true],
}],
};
let (_, uffd_regions) =
create_guest_memory(&mem_state, false, HugePageConfig::None).unwrap();
assert_eq!(uffd_regions.len(), 1);
assert_eq!(uffd_regions[0].size, 0x20000);
assert_eq!(uffd_regions[0].offset, 0);
assert_eq!(uffd_regions[0].page_size, HugePageConfig::None.page_size());
}
#[test]
fn test_send_uffd_handshake() {
#[allow(deprecated)]
let uffd_regions = vec![
GuestRegionUffdMapping {
base_host_virt_addr: 0,
size: 0x100000,
offset: 0,
page_size: HugePageConfig::None.page_size(),
page_size_kib: HugePageConfig::None.page_size(),
},
GuestRegionUffdMapping {
base_host_virt_addr: 0x100000,
size: 0x200000,
offset: 0,
page_size: HugePageConfig::Hugetlbfs2M.page_size(),
page_size_kib: HugePageConfig::Hugetlbfs2M.page_size(),
},
];
let uds_path = TempFile::new().unwrap();
let uds_path = uds_path.as_path();
std::fs::remove_file(uds_path).unwrap();
let listener = UnixListener::bind(uds_path).expect("Cannot bind to socket path");
send_uffd_handshake(uds_path, &uffd_regions, &std::io::stdin()).unwrap();
let (stream, _) = listener.accept().expect("Cannot listen on UDS socket");
let mut message_buf = vec![0u8; 1024];
let (bytes_read, _) = stream
.recv_with_fd(&mut message_buf[..])
.expect("Cannot recv_with_fd");
message_buf.resize(bytes_read, 0);
let deserialized: Vec<GuestRegionUffdMapping> =
serde_json::from_slice(&message_buf).unwrap();
assert_eq!(uffd_regions, deserialized);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/builder.rs | src/vmm/src/builder.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Enables pre-boot setup, instantiation and booting of a Firecracker VMM.
use std::fmt::Debug;
use std::io;
#[cfg(feature = "gdb")]
use std::sync::mpsc;
use std::sync::{Arc, Mutex};
use event_manager::SubscriberOps;
use linux_loader::cmdline::Cmdline as LoaderKernelCmdline;
use userfaultfd::Uffd;
use utils::time::TimestampUs;
use vm_allocator::AllocPolicy;
use vm_memory::GuestAddress;
#[cfg(target_arch = "aarch64")]
use crate::Vcpu;
use crate::arch::{ConfigurationError, configure_system_for_boot, load_kernel};
#[cfg(target_arch = "aarch64")]
use crate::construct_kvm_mpidrs;
use crate::cpu_config::templates::{GetCpuTemplate, GetCpuTemplateError, GuestConfigError};
#[cfg(target_arch = "x86_64")]
use crate::device_manager;
use crate::device_manager::pci_mngr::PciManagerError;
use crate::device_manager::{
AttachDeviceError, DeviceManager, DeviceManagerCreateError, DevicePersistError,
DeviceRestoreArgs,
};
use crate::devices::virtio::balloon::Balloon;
use crate::devices::virtio::block::device::Block;
use crate::devices::virtio::mem::{VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB, VirtioMem};
use crate::devices::virtio::net::Net;
use crate::devices::virtio::pmem::device::Pmem;
use crate::devices::virtio::rng::Entropy;
use crate::devices::virtio::vsock::{Vsock, VsockUnixBackend};
#[cfg(feature = "gdb")]
use crate::gdb;
use crate::initrd::{InitrdConfig, InitrdError};
use crate::logger::debug;
use crate::persist::{MicrovmState, MicrovmStateError};
use crate::resources::VmResources;
use crate::seccomp::BpfThreadMap;
use crate::snapshot::Persist;
use crate::utils::mib_to_bytes;
use crate::vmm_config::instance_info::InstanceInfo;
use crate::vmm_config::machine_config::MachineConfigError;
use crate::vmm_config::memory_hotplug::MemoryHotplugConfig;
use crate::vstate::kvm::{Kvm, KvmError};
use crate::vstate::memory::GuestRegionMmap;
#[cfg(target_arch = "aarch64")]
use crate::vstate::resources::ResourceAllocator;
use crate::vstate::vcpu::VcpuError;
use crate::vstate::vm::{Vm, VmError};
use crate::{EventManager, Vmm, VmmError};
/// Errors associated with starting the instance.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum StartMicrovmError {
/// Unable to attach block device to Vmm: {0}
AttachBlockDevice(io::Error),
/// Could not attach device: {0}
AttachDevice(#[from] AttachDeviceError),
/// System configuration error: {0}
ConfigureSystem(#[from] ConfigurationError),
/// Failed to create device manager: {0}
CreateDeviceManager(#[from] DeviceManagerCreateError),
/// Failed to create guest config: {0}
CreateGuestConfig(#[from] GuestConfigError),
/// Cannot create network device: {0}
CreateNetDevice(crate::devices::virtio::net::NetError),
/// Cannot create pmem device: {0}
CreatePmemDevice(#[from] crate::devices::virtio::pmem::device::PmemError),
/// Cannot create RateLimiter: {0}
CreateRateLimiter(io::Error),
/// Error creating legacy device: {0}
#[cfg(target_arch = "x86_64")]
CreateLegacyDevice(device_manager::legacy::LegacyDeviceError),
/// Error enabling PCIe support: {0}
EnablePciDevices(#[from] PciManagerError),
/// Error enabling pvtime on vcpu: {0}
#[cfg(target_arch = "aarch64")]
EnablePVTime(crate::arch::VcpuArchError),
/// Invalid Memory Configuration: {0}
GuestMemory(crate::vstate::memory::MemoryError),
/// Error with initrd initialization: {0}.
Initrd(#[from] InitrdError),
/// Internal error while starting microVM: {0}
Internal(#[from] VmmError),
/// Failed to get CPU template: {0}
GetCpuTemplate(#[from] GetCpuTemplateError),
/// Invalid kernel command line: {0}
KernelCmdline(String),
/// Kvm error: {0}
Kvm(#[from] KvmError),
/// Cannot load command line string: {0}
LoadCommandline(linux_loader::loader::Error),
/// Cannot start microvm without kernel configuration.
MissingKernelConfig,
/// Cannot start microvm without guest mem_size config.
MissingMemSizeConfig,
/// No seccomp filter for thread category: {0}
MissingSeccompFilters(String),
/// The net device configuration is missing the tap device.
NetDeviceNotConfigured,
/// Cannot open the block device backing file: {0}
OpenBlockDevice(io::Error),
/// Cannot restore microvm state: {0}
RestoreMicrovmState(MicrovmStateError),
/// Cannot set vm resources: {0}
SetVmResources(MachineConfigError),
/// Cannot create the entropy device: {0}
CreateEntropyDevice(crate::devices::virtio::rng::EntropyError),
/// Failed to allocate guest resource: {0}
AllocateResources(#[from] vm_allocator::Error),
/// Error starting GDB debug session: {0}
#[cfg(feature = "gdb")]
GdbServer(gdb::target::GdbTargetError),
/// Error cloning Vcpu fds
#[cfg(feature = "gdb")]
VcpuFdCloneError(#[from] crate::vstate::vcpu::CopyKvmFdError),
/// Error with the Vm object: {0}
Vm(#[from] VmError),
}
/// It's convenient to automatically convert `linux_loader::cmdline::Error`s
/// to `StartMicrovmError`s.
impl std::convert::From<linux_loader::cmdline::Error> for StartMicrovmError {
fn from(err: linux_loader::cmdline::Error) -> StartMicrovmError {
StartMicrovmError::KernelCmdline(err.to_string())
}
}
/// Builds and starts a microVM based on the current Firecracker VmResources configuration.
///
/// The built microVM and all the created vCPUs start off in the paused state.
/// To boot the microVM and run those vCPUs, `Vmm::resume_vm()` needs to be
/// called.
pub fn build_microvm_for_boot(
instance_info: &InstanceInfo,
vm_resources: &super::resources::VmResources,
event_manager: &mut EventManager,
seccomp_filters: &BpfThreadMap,
) -> Result<Arc<Mutex<Vmm>>, StartMicrovmError> {
// Timestamp for measuring microVM boot duration.
let request_ts = TimestampUs::default();
let boot_config = vm_resources
.boot_source
.builder
.as_ref()
.ok_or(StartMicrovmError::MissingKernelConfig)?;
let guest_memory = vm_resources
.allocate_guest_memory()
.map_err(StartMicrovmError::GuestMemory)?;
// Clone the command-line so that a failed boot doesn't pollute the original.
#[allow(unused_mut)]
let mut boot_cmdline = boot_config.cmdline.clone();
let cpu_template = vm_resources
.machine_config
.cpu_template
.get_cpu_template()?;
let kvm = Kvm::new(cpu_template.kvm_capabilities.clone())?;
// Set up Kvm Vm and register memory regions.
// Build custom CPU config if a custom template is provided.
let mut vm = Vm::new(&kvm)?;
let (mut vcpus, vcpus_exit_evt) = vm.create_vcpus(vm_resources.machine_config.vcpu_count)?;
vm.register_dram_memory_regions(guest_memory)?;
// Allocate memory as soon as possible to make hotpluggable memory available to all consumers,
// before they clone the GuestMemoryMmap object
let virtio_mem_addr = if let Some(memory_hotplug) = &vm_resources.memory_hotplug {
let addr = allocate_virtio_mem_address(&vm, memory_hotplug.total_size_mib)?;
let hotplug_memory_region = vm_resources
.allocate_memory_region(addr, mib_to_bytes(memory_hotplug.total_size_mib))
.map_err(StartMicrovmError::GuestMemory)?;
vm.register_hotpluggable_memory_region(
hotplug_memory_region,
mib_to_bytes(memory_hotplug.slot_size_mib),
)?;
Some(addr)
} else {
None
};
let mut device_manager = DeviceManager::new(
event_manager,
&vcpus_exit_evt,
&vm,
vm_resources.serial_out_path.as_ref(),
)?;
let vm = Arc::new(vm);
let entry_point = load_kernel(&boot_config.kernel_file, vm.guest_memory())?;
let initrd = InitrdConfig::from_config(boot_config, vm.guest_memory())?;
if vm_resources.pci_enabled {
device_manager.enable_pci(&vm)?;
} else {
boot_cmdline.insert("pci", "off")?;
}
// The boot timer device needs to be the first device attached in order
// to maintain the same MMIO address referenced in the documentation
// and tests.
if vm_resources.boot_timer {
device_manager.attach_boot_timer_device(&vm, request_ts)?;
}
if let Some(balloon) = vm_resources.balloon.get() {
attach_balloon_device(
&mut device_manager,
&vm,
&mut boot_cmdline,
balloon,
event_manager,
)?;
}
attach_block_devices(
&mut device_manager,
&vm,
&mut boot_cmdline,
vm_resources.block.devices.iter(),
event_manager,
)?;
attach_net_devices(
&mut device_manager,
&vm,
&mut boot_cmdline,
vm_resources.net_builder.iter(),
event_manager,
)?;
attach_pmem_devices(
&mut device_manager,
&vm,
&mut boot_cmdline,
vm_resources.pmem.devices.iter(),
event_manager,
)?;
if let Some(unix_vsock) = vm_resources.vsock.get() {
attach_unixsock_vsock_device(
&mut device_manager,
&vm,
&mut boot_cmdline,
unix_vsock,
event_manager,
)?;
}
if let Some(entropy) = vm_resources.entropy.get() {
attach_entropy_device(
&mut device_manager,
&vm,
&mut boot_cmdline,
entropy,
event_manager,
)?;
}
// Attach virtio-mem device if configured
if let Some(memory_hotplug) = &vm_resources.memory_hotplug {
attach_virtio_mem_device(
&mut device_manager,
&vm,
&mut boot_cmdline,
memory_hotplug,
event_manager,
virtio_mem_addr.expect("address should be allocated"),
)?;
}
#[cfg(target_arch = "aarch64")]
device_manager.attach_legacy_devices_aarch64(
&vm,
event_manager,
&mut boot_cmdline,
vm_resources.serial_out_path.as_ref(),
)?;
device_manager.attach_vmgenid_device(&vm)?;
#[cfg(target_arch = "x86_64")]
device_manager.attach_vmclock_device(&vm)?;
#[cfg(target_arch = "aarch64")]
if vcpus[0].kvm_vcpu.supports_pvtime() {
setup_pvtime(&mut vm.resource_allocator(), &mut vcpus)?;
} else {
log::warn!("Vcpus do not support pvtime, steal time will not be reported to guest");
}
configure_system_for_boot(
&kvm,
&vm,
&mut device_manager,
vcpus.as_mut(),
&vm_resources.machine_config,
&cpu_template,
entry_point,
&initrd,
boot_cmdline,
)?;
let vmm = Vmm {
instance_info: instance_info.clone(),
shutdown_exit_code: None,
kvm,
vm,
uffd: None,
vcpus_handles: Vec::new(),
vcpus_exit_evt,
device_manager,
};
let vmm = Arc::new(Mutex::new(vmm));
#[cfg(feature = "gdb")]
let (gdb_tx, gdb_rx) = mpsc::channel();
#[cfg(feature = "gdb")]
vcpus
.iter_mut()
.for_each(|vcpu| vcpu.attach_debug_info(gdb_tx.clone()));
// Move vcpus to their own threads and start their state machine in the 'Paused' state.
vmm.lock()
.unwrap()
.start_vcpus(
vcpus,
seccomp_filters
.get("vcpu")
.ok_or_else(|| StartMicrovmError::MissingSeccompFilters("vcpu".to_string()))?
.clone(),
)
.map_err(VmmError::VcpuStart)?;
#[cfg(feature = "gdb")]
if let Some(gdb_socket_path) = &vm_resources.machine_config.gdb_socket_path {
gdb::gdb_thread(vmm.clone(), gdb_rx, entry_point.entry_addr, gdb_socket_path)
.map_err(StartMicrovmError::GdbServer)?;
} else {
debug!("No GDB socket provided not starting gdb server.");
}
// Load seccomp filters for the VMM thread.
// Execution panics if filters cannot be loaded, use --no-seccomp if skipping filters
// altogether is the desired behaviour.
// Keep this as the last step before resuming vcpus.
crate::seccomp::apply_filter(
seccomp_filters
.get("vmm")
.ok_or_else(|| StartMicrovmError::MissingSeccompFilters("vmm".to_string()))?,
)
.map_err(VmmError::SeccompFilters)?;
event_manager.add_subscriber(vmm.clone());
Ok(vmm)
}
/// Builds and boots a microVM based on the current Firecracker VmResources configuration.
///
/// This is the default build recipe, one could build other microVM flavors by using the
/// independent functions in this module instead of calling this recipe.
///
/// An `Arc` reference of the built `Vmm` is also plugged in the `EventManager`, while another
/// is returned.
pub fn build_and_boot_microvm(
instance_info: &InstanceInfo,
vm_resources: &super::resources::VmResources,
event_manager: &mut EventManager,
seccomp_filters: &BpfThreadMap,
) -> Result<Arc<Mutex<Vmm>>, StartMicrovmError> {
debug!("event_start: build microvm for boot");
let vmm = build_microvm_for_boot(instance_info, vm_resources, event_manager, seccomp_filters)?;
debug!("event_end: build microvm for boot");
// The vcpus start off in the `Paused` state, let them run.
debug!("event_start: boot microvm");
vmm.lock().unwrap().resume_vm()?;
debug!("event_end: boot microvm");
Ok(vmm)
}
/// Error type for [`build_microvm_from_snapshot`].
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum BuildMicrovmFromSnapshotError {
/// Failed to create microVM and vCPUs: {0}
CreateMicrovmAndVcpus(#[from] StartMicrovmError),
/// Could not access KVM: {0}
KvmAccess(#[from] vmm_sys_util::errno::Error),
/// Error configuring the TSC, frequency not present in the given snapshot.
TscFrequencyNotPresent,
#[cfg(target_arch = "x86_64")]
/// Could not get TSC to check if TSC scaling was required with the snapshot: {0}
GetTsc(#[from] crate::arch::GetTscError),
#[cfg(target_arch = "x86_64")]
/// Could not set TSC scaling within the snapshot: {0}
SetTsc(#[from] crate::arch::SetTscError),
/// Failed to restore microVM state: {0}
RestoreState(#[from] crate::vstate::vm::ArchVmError),
/// Failed to update microVM configuration: {0}
VmUpdateConfig(#[from] MachineConfigError),
/// Failed to restore MMIO device: {0}
RestoreMmioDevice(#[from] MicrovmStateError),
/// Failed to emulate MMIO serial: {0}
EmulateSerialInit(#[from] crate::EmulateSerialInitError),
/// Failed to start vCPUs as no vCPU seccomp filter found.
MissingVcpuSeccompFilters,
/// Failed to start vCPUs: {0}
StartVcpus(#[from] crate::StartVcpusError),
/// Failed to restore vCPUs: {0}
RestoreVcpus(#[from] VcpuError),
/// Failed to apply VMM secccomp filter as none found.
MissingVmmSeccompFilters,
/// Failed to apply VMM secccomp filter: {0}
SeccompFiltersInternal(#[from] crate::seccomp::InstallationError),
/// Failed to restore devices: {0}
RestoreDevices(#[from] DevicePersistError),
}
/// Builds and starts a microVM based on the provided MicrovmState.
///
/// An `Arc` reference of the built `Vmm` is also plugged in the `EventManager`, while another
/// is returned.
#[allow(clippy::too_many_arguments)]
pub fn build_microvm_from_snapshot(
instance_info: &InstanceInfo,
event_manager: &mut EventManager,
microvm_state: MicrovmState,
guest_memory: Vec<GuestRegionMmap>,
uffd: Option<Uffd>,
seccomp_filters: &BpfThreadMap,
vm_resources: &mut VmResources,
) -> Result<Arc<Mutex<Vmm>>, BuildMicrovmFromSnapshotError> {
// Build Vmm.
debug!("event_start: build microvm from snapshot");
let kvm = Kvm::new(microvm_state.kvm_state.kvm_cap_modifiers.clone())
.map_err(StartMicrovmError::Kvm)?;
// Set up Kvm Vm and register memory regions.
// Build custom CPU config if a custom template is provided.
let mut vm = Vm::new(&kvm).map_err(StartMicrovmError::Vm)?;
let (mut vcpus, vcpus_exit_evt) = vm
.create_vcpus(vm_resources.machine_config.vcpu_count)
.map_err(StartMicrovmError::Vm)?;
vm.restore_memory_regions(guest_memory, µvm_state.vm_state.memory)
.map_err(StartMicrovmError::Vm)?;
#[cfg(target_arch = "x86_64")]
{
// Scale TSC to match, extract the TSC freq from the state if specified
if let Some(state_tsc) = microvm_state.vcpu_states[0].tsc_khz {
// Scale the TSC frequency for all VCPUs. If a TSC frequency is not specified in the
// snapshot, by default it uses the host frequency.
if vcpus[0].kvm_vcpu.is_tsc_scaling_required(state_tsc)? {
for vcpu in &vcpus {
vcpu.kvm_vcpu.set_tsc_khz(state_tsc)?;
}
}
}
}
// Restore vcpus kvm state.
for (vcpu, state) in vcpus.iter_mut().zip(microvm_state.vcpu_states.iter()) {
vcpu.kvm_vcpu
.restore_state(state)
.map_err(VcpuError::VcpuResponse)
.map_err(BuildMicrovmFromSnapshotError::RestoreVcpus)?;
}
#[cfg(target_arch = "aarch64")]
{
let mpidrs = construct_kvm_mpidrs(µvm_state.vcpu_states);
// Restore kvm vm state.
vm.restore_state(&mpidrs, µvm_state.vm_state)?;
}
// Restore kvm vm state.
#[cfg(target_arch = "x86_64")]
vm.restore_state(µvm_state.vm_state)?;
// Restore the boot source config paths.
vm_resources.boot_source.config = microvm_state.vm_info.boot_source;
let vm = Arc::new(vm);
// Restore devices states.
// Restoring VMGenID injects an interrupt in the guest to notify it about the new generation
// ID. As a result, we need to restore DeviceManager after restoring the KVM state, otherwise
// the injected interrupt will be overwritten.
let device_ctor_args = DeviceRestoreArgs {
mem: vm.guest_memory(),
vm: &vm,
event_manager,
vm_resources,
instance_id: &instance_info.id,
vcpus_exit_evt: &vcpus_exit_evt,
};
#[allow(unused_mut)]
let mut device_manager =
DeviceManager::restore(device_ctor_args, µvm_state.device_states)?;
let mut vmm = Vmm {
instance_info: instance_info.clone(),
shutdown_exit_code: None,
kvm,
vm,
uffd,
vcpus_handles: Vec::new(),
vcpus_exit_evt,
device_manager,
};
// Move vcpus to their own threads and start their state machine in the 'Paused' state.
vmm.start_vcpus(
vcpus,
seccomp_filters
.get("vcpu")
.ok_or(BuildMicrovmFromSnapshotError::MissingVcpuSeccompFilters)?
.clone(),
)?;
let vmm = Arc::new(Mutex::new(vmm));
event_manager.add_subscriber(vmm.clone());
// Load seccomp filters for the VMM thread.
// Keep this as the last step of the building process.
crate::seccomp::apply_filter(
seccomp_filters
.get("vmm")
.ok_or(BuildMicrovmFromSnapshotError::MissingVmmSeccompFilters)?,
)?;
debug!("event_end: build microvm from snapshot");
Ok(vmm)
}
/// 64 bytes due to alignment requirement in 3.1 of https://www.kernel.org/doc/html/v5.8/virt/kvm/devices/vcpu.html#attribute-kvm-arm-vcpu-pvtime-ipa
#[cfg(target_arch = "aarch64")]
const STEALTIME_STRUCT_MEM_SIZE: u64 = 64;
/// Helper method to allocate steal time region
#[cfg(target_arch = "aarch64")]
fn allocate_pvtime_region(
resource_allocator: &mut ResourceAllocator,
vcpu_count: usize,
policy: vm_allocator::AllocPolicy,
) -> Result<GuestAddress, StartMicrovmError> {
let size = STEALTIME_STRUCT_MEM_SIZE * vcpu_count as u64;
let addr = resource_allocator
.allocate_system_memory(size, STEALTIME_STRUCT_MEM_SIZE, policy)
.map_err(StartMicrovmError::AllocateResources)?;
Ok(GuestAddress(addr))
}
/// Sets up pvtime for all vcpus
#[cfg(target_arch = "aarch64")]
fn setup_pvtime(
resource_allocator: &mut ResourceAllocator,
vcpus: &mut [Vcpu],
) -> Result<(), StartMicrovmError> {
// Alloc sys mem for steal time region
let pvtime_mem: GuestAddress = allocate_pvtime_region(
resource_allocator,
vcpus.len(),
vm_allocator::AllocPolicy::LastMatch,
)?;
// Register all vcpus with pvtime device
for (i, vcpu) in vcpus.iter_mut().enumerate() {
vcpu.kvm_vcpu
.enable_pvtime(GuestAddress(
pvtime_mem.0 + i as u64 * STEALTIME_STRUCT_MEM_SIZE,
))
.map_err(StartMicrovmError::EnablePVTime)?;
}
Ok(())
}
fn attach_entropy_device(
device_manager: &mut DeviceManager,
vm: &Arc<Vm>,
cmdline: &mut LoaderKernelCmdline,
entropy_device: &Arc<Mutex<Entropy>>,
event_manager: &mut EventManager,
) -> Result<(), AttachDeviceError> {
let id = entropy_device
.lock()
.expect("Poisoned lock")
.id()
.to_string();
event_manager.add_subscriber(entropy_device.clone());
device_manager.attach_virtio_device(vm, id, entropy_device.clone(), cmdline, false)
}
fn allocate_virtio_mem_address(
vm: &Vm,
total_size_mib: usize,
) -> Result<GuestAddress, StartMicrovmError> {
let addr = vm
.resource_allocator()
.past_mmio64_memory
.allocate(
mib_to_bytes(total_size_mib) as u64,
mib_to_bytes(VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB) as u64,
AllocPolicy::FirstMatch,
)?
.start();
Ok(GuestAddress(addr))
}
fn attach_virtio_mem_device(
device_manager: &mut DeviceManager,
vm: &Arc<Vm>,
cmdline: &mut LoaderKernelCmdline,
config: &MemoryHotplugConfig,
event_manager: &mut EventManager,
addr: GuestAddress,
) -> Result<(), StartMicrovmError> {
let virtio_mem = Arc::new(Mutex::new(
VirtioMem::new(
Arc::clone(vm),
addr,
config.total_size_mib,
config.block_size_mib,
config.slot_size_mib,
)
.map_err(|e| StartMicrovmError::Internal(VmmError::VirtioMem(e)))?,
));
let id = virtio_mem.lock().expect("Poisoned lock").id().to_string();
event_manager.add_subscriber(virtio_mem.clone());
device_manager.attach_virtio_device(vm, id, virtio_mem.clone(), cmdline, false)?;
Ok(())
}
fn attach_block_devices<'a, I: Iterator<Item = &'a Arc<Mutex<Block>>> + Debug>(
device_manager: &mut DeviceManager,
vm: &Arc<Vm>,
cmdline: &mut LoaderKernelCmdline,
blocks: I,
event_manager: &mut EventManager,
) -> Result<(), StartMicrovmError> {
for block in blocks {
let (id, is_vhost_user) = {
let locked = block.lock().expect("Poisoned lock");
if locked.root_device() {
match locked.partuuid() {
Some(partuuid) => cmdline.insert_str(format!("root=PARTUUID={}", partuuid))?,
None => cmdline.insert_str("root=/dev/vda")?,
}
match locked.read_only() {
true => cmdline.insert_str("ro")?,
false => cmdline.insert_str("rw")?,
}
}
(locked.id().to_string(), locked.is_vhost_user())
};
// The device mutex mustn't be locked here otherwise it will deadlock.
event_manager.add_subscriber(block.clone());
device_manager.attach_virtio_device(vm, id, block.clone(), cmdline, is_vhost_user)?;
}
Ok(())
}
fn attach_net_devices<'a, I: Iterator<Item = &'a Arc<Mutex<Net>>> + Debug>(
device_manager: &mut DeviceManager,
vm: &Arc<Vm>,
cmdline: &mut LoaderKernelCmdline,
net_devices: I,
event_manager: &mut EventManager,
) -> Result<(), StartMicrovmError> {
for net_device in net_devices {
let id = net_device.lock().expect("Poisoned lock").id().clone();
event_manager.add_subscriber(net_device.clone());
// The device mutex mustn't be locked here otherwise it will deadlock.
device_manager.attach_virtio_device(vm, id, net_device.clone(), cmdline, false)?;
}
Ok(())
}
fn attach_pmem_devices<'a, I: Iterator<Item = &'a Arc<Mutex<Pmem>>> + Debug>(
device_manager: &mut DeviceManager,
vm: &Arc<Vm>,
cmdline: &mut LoaderKernelCmdline,
pmem_devices: I,
event_manager: &mut EventManager,
) -> Result<(), StartMicrovmError> {
for (i, device) in pmem_devices.enumerate() {
let id = {
let mut locked_dev = device.lock().expect("Poisoned lock");
if locked_dev.config.root_device {
cmdline.insert_str(format!("root=/dev/pmem{i}"))?;
match locked_dev.config.read_only {
true => cmdline.insert_str("ro")?,
false => cmdline.insert_str("rw")?,
}
}
locked_dev.alloc_region(vm.as_ref());
locked_dev.set_mem_region(vm.as_ref())?;
locked_dev.config.id.to_string()
};
event_manager.add_subscriber(device.clone());
device_manager.attach_virtio_device(vm, id, device.clone(), cmdline, false)?;
}
Ok(())
}
fn attach_unixsock_vsock_device(
device_manager: &mut DeviceManager,
vm: &Arc<Vm>,
cmdline: &mut LoaderKernelCmdline,
unix_vsock: &Arc<Mutex<Vsock<VsockUnixBackend>>>,
event_manager: &mut EventManager,
) -> Result<(), AttachDeviceError> {
let id = String::from(unix_vsock.lock().expect("Poisoned lock").id());
event_manager.add_subscriber(unix_vsock.clone());
// The device mutex mustn't be locked here otherwise it will deadlock.
device_manager.attach_virtio_device(vm, id, unix_vsock.clone(), cmdline, false)
}
fn attach_balloon_device(
device_manager: &mut DeviceManager,
vm: &Arc<Vm>,
cmdline: &mut LoaderKernelCmdline,
balloon: &Arc<Mutex<Balloon>>,
event_manager: &mut EventManager,
) -> Result<(), AttachDeviceError> {
let id = String::from(balloon.lock().expect("Poisoned lock").id());
event_manager.add_subscriber(balloon.clone());
// The device mutex mustn't be locked here otherwise it will deadlock.
device_manager.attach_virtio_device(vm, id, balloon.clone(), cmdline, false)
}
#[cfg(test)]
pub(crate) mod tests {
use linux_loader::cmdline::Cmdline;
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::device_manager::tests::default_device_manager;
use crate::devices::virtio::block::CacheType;
use crate::devices::virtio::generated::virtio_ids;
use crate::devices::virtio::rng::device::ENTROPY_DEV_ID;
use crate::devices::virtio::vsock::VSOCK_DEV_ID;
use crate::mmds::data_store::{Mmds, MmdsVersion};
use crate::mmds::ns::MmdsNetworkStack;
use crate::utils::mib_to_bytes;
use crate::vmm_config::balloon::{BALLOON_DEV_ID, BalloonBuilder, BalloonDeviceConfig};
use crate::vmm_config::boot_source::DEFAULT_KERNEL_CMDLINE;
use crate::vmm_config::drive::{BlockBuilder, BlockDeviceConfig};
use crate::vmm_config::entropy::{EntropyDeviceBuilder, EntropyDeviceConfig};
use crate::vmm_config::net::{NetBuilder, NetworkInterfaceConfig};
use crate::vmm_config::pmem::{PmemBuilder, PmemConfig};
use crate::vmm_config::vsock::tests::default_config;
use crate::vmm_config::vsock::{VsockBuilder, VsockDeviceConfig};
use crate::vstate::vm::tests::setup_vm_with_memory;
#[derive(Debug)]
pub(crate) struct CustomBlockConfig {
drive_id: String,
is_root_device: bool,
partuuid: Option<String>,
is_read_only: bool,
cache_type: CacheType,
}
impl CustomBlockConfig {
pub(crate) fn new(
drive_id: String,
is_root_device: bool,
partuuid: Option<String>,
is_read_only: bool,
cache_type: CacheType,
) -> Self {
CustomBlockConfig {
drive_id,
is_root_device,
partuuid,
is_read_only,
cache_type,
}
}
}
fn cmdline_contains(cmdline: &Cmdline, slug: &str) -> bool {
// The following unwraps can never fail; the only way any of these methods
// would return an `Err` is if one of the following conditions is met:
// 1. The command line is empty: We just added things to it, and if insertion of an
// argument goes wrong, then `Cmdline::insert` would have already returned `Err`.
// 2. There's a spurious null character somewhere in the command line: The
// `Cmdline::insert` methods verify that this is not the case.
// 3. The `CString` is not valid UTF8: It just got created from a `String`, which was
// valid UTF8.
cmdline
.as_cstring()
.unwrap()
.into_string()
.unwrap()
.contains(slug)
}
pub(crate) fn default_kernel_cmdline() -> Cmdline {
linux_loader::cmdline::Cmdline::try_from(
DEFAULT_KERNEL_CMDLINE,
crate::arch::CMDLINE_MAX_SIZE,
)
.unwrap()
}
pub(crate) fn default_vmm() -> Vmm {
let (kvm, mut vm) = setup_vm_with_memory(mib_to_bytes(128));
let (_, vcpus_exit_evt) = vm.create_vcpus(1).unwrap();
Vmm {
instance_info: InstanceInfo::default(),
shutdown_exit_code: None,
kvm,
vm: Arc::new(vm),
uffd: None,
vcpus_handles: Vec::new(),
vcpus_exit_evt,
device_manager: default_device_manager(),
}
}
pub(crate) fn insert_block_devices(
vmm: &mut Vmm,
cmdline: &mut Cmdline,
event_manager: &mut EventManager,
custom_block_cfgs: Vec<CustomBlockConfig>,
) -> Vec<TempFile> {
let mut block_dev_configs = BlockBuilder::new();
let mut block_files = Vec::new();
for custom_block_cfg in custom_block_cfgs {
block_files.push(TempFile::new().unwrap());
let block_device_config = BlockDeviceConfig {
drive_id: String::from(&custom_block_cfg.drive_id),
partuuid: custom_block_cfg.partuuid,
is_root_device: custom_block_cfg.is_root_device,
cache_type: custom_block_cfg.cache_type,
is_read_only: Some(custom_block_cfg.is_read_only),
path_on_host: Some(
block_files
.last()
.unwrap()
.as_path()
.to_str()
.unwrap()
.to_string(),
),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
block_dev_configs
.insert(block_device_config, false)
.unwrap();
}
attach_block_devices(
&mut vmm.device_manager,
&vmm.vm,
cmdline,
block_dev_configs.devices.iter(),
event_manager,
)
.unwrap();
block_files
}
pub(crate) fn insert_net_device(
vmm: &mut Vmm,
cmdline: &mut Cmdline,
event_manager: &mut EventManager,
net_config: NetworkInterfaceConfig,
) {
let mut net_builder = NetBuilder::new();
net_builder.build(net_config).unwrap();
let res = attach_net_devices(
&mut vmm.device_manager,
&vmm.vm,
cmdline,
net_builder.iter(),
event_manager,
);
res.unwrap();
}
pub(crate) fn insert_net_device_with_mmds(
vmm: &mut Vmm,
cmdline: &mut Cmdline,
event_manager: &mut EventManager,
net_config: NetworkInterfaceConfig,
mmds_version: MmdsVersion,
) {
let mut net_builder = NetBuilder::new();
net_builder.build(net_config).unwrap();
let net = net_builder.iter().next().unwrap();
let mut mmds = Mmds::default();
mmds.set_version(mmds_version);
net.lock().unwrap().configure_mmds_network_stack(
MmdsNetworkStack::default_ipv4_addr(),
Arc::new(Mutex::new(mmds)),
);
attach_net_devices(
&mut vmm.device_manager,
&vmm.vm,
cmdline,
net_builder.iter(),
event_manager,
)
.unwrap();
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/lib.rs | src/vmm/src/lib.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Virtual Machine Monitor that leverages the Linux Kernel-based Virtual Machine (KVM),
//! and other virtualization features to run a single lightweight micro-virtual
//! machine (microVM).
#![warn(missing_docs)]
#![warn(clippy::undocumented_unsafe_blocks)]
#![allow(clippy::blanket_clippy_restriction_lints)]
/// Implements platform specific functionality.
/// Supported platforms: x86_64 and aarch64.
pub mod arch;
/// High-level interface over Linux io_uring.
///
/// Aims to provide an easy-to-use interface, while making some Firecracker-specific simplifying
/// assumptions. The crate does not currently aim at supporting all io_uring features and use
/// cases. For example, it only works with pre-registered fds and read/write/fsync requests.
///
/// Requires at least kernel version 5.10.51.
/// For more information on io_uring, refer to the man pages.
/// [This pdf](https://kernel.dk/io_uring.pdf) is also very useful, though outdated at times.
pub mod io_uring;
/// # Rate Limiter
///
/// Provides a rate limiter written in Rust useful for IO operations that need to
/// be throttled.
///
/// ## Behavior
///
/// The rate limiter starts off as 'unblocked' with two token buckets configured
/// with the values passed in the `RateLimiter::new()` constructor.
/// All subsequent accounting is done independently for each token bucket based
/// on the `TokenType` used. If any of the buckets runs out of budget, the limiter
/// goes in the 'blocked' state. At this point an internal timer is set up which
/// will later 'wake up' the user in order to retry sending data. The 'wake up'
/// notification will be dispatched as an event on the FD provided by the `AsRawFD`
/// trait implementation.
///
/// The contract is that the user shall also call the `event_handler()` method on
/// receipt of such an event.
///
/// The token buckets are replenished when a called `consume()` doesn't find enough
/// tokens in the bucket. The amount of tokens replenished is automatically calculated
/// to respect the `complete_refill_time` configuration parameter provided by the user.
/// The token buckets will never replenish above their respective `size`.
///
/// Each token bucket can start off with a `one_time_burst` initial extra capacity
/// on top of their `size`. This initial extra credit does not replenish and
/// can be used for an initial burst of data.
///
/// The granularity for 'wake up' events when the rate limiter is blocked is
/// currently hardcoded to `100 milliseconds`.
///
/// ## Limitations
///
/// This rate limiter implementation relies on the *Linux kernel's timerfd* so its
/// usage is limited to Linux systems.
///
/// Another particularity of this implementation is that it is not self-driving.
/// It is meant to be used in an external event loop and thus implements the `AsRawFd`
/// trait and provides an *event-handler* as part of its API. This *event-handler*
/// needs to be called by the user on every event on the rate limiter's `AsRawFd` FD.
pub mod rate_limiter;
/// Module for handling ACPI tables.
/// Currently, we only use ACPI on x86 microVMs.
#[cfg(target_arch = "x86_64")]
pub mod acpi;
/// Handles setup and initialization a `Vmm` object.
pub mod builder;
/// Types for guest configuration.
pub mod cpu_config;
pub(crate) mod device_manager;
/// Emulates virtual and hardware devices.
#[allow(missing_docs)]
pub mod devices;
/// minimalist HTTP/TCP/IPv4 stack named DUMBO
pub mod dumbo;
/// Support for GDB debugging the guest
#[cfg(feature = "gdb")]
pub mod gdb;
/// Logger
pub mod logger;
/// microVM Metadata Service MMDS
pub mod mmds;
/// PCI specific emulation code.
pub mod pci;
/// Save/restore utilities.
pub mod persist;
/// Resource store for configured microVM resources.
pub mod resources;
/// microVM RPC API adapters.
pub mod rpc_interface;
/// Seccomp filter utilities.
pub mod seccomp;
/// Signal handling utilities.
pub mod signal_handler;
/// Serialization and deserialization facilities
pub mod snapshot;
/// Utility functions for integration and benchmark testing
pub mod test_utils;
/// Utility functions and struct
pub mod utils;
/// Wrappers over structures used to configure the VMM.
pub mod vmm_config;
/// Module with virtual state structs.
pub mod vstate;
/// Module with initrd.
pub mod initrd;
use std::collections::HashMap;
use std::io;
use std::os::unix::io::AsRawFd;
use std::sync::mpsc::RecvTimeoutError;
use std::sync::{Arc, Barrier, Mutex};
use std::time::Duration;
use device_manager::DeviceManager;
use event_manager::{EventManager as BaseEventManager, EventOps, Events, MutEventSubscriber};
use seccomp::BpfProgram;
use snapshot::Persist;
use userfaultfd::Uffd;
use vmm_sys_util::epoll::EventSet;
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::terminal::Terminal;
use vstate::kvm::Kvm;
use vstate::vcpu::{self, StartThreadedError, VcpuSendEventError};
use crate::cpu_config::templates::CpuConfiguration;
use crate::devices::virtio::balloon::device::{HintingStatus, StartHintingCmd};
use crate::devices::virtio::balloon::{
BALLOON_DEV_ID, Balloon, BalloonConfig, BalloonError, BalloonStats,
};
use crate::devices::virtio::block::BlockError;
use crate::devices::virtio::block::device::Block;
use crate::devices::virtio::mem::{VIRTIO_MEM_DEV_ID, VirtioMem, VirtioMemError, VirtioMemStatus};
use crate::devices::virtio::net::Net;
use crate::logger::{METRICS, MetricsError, error, info, warn};
use crate::persist::{MicrovmState, MicrovmStateError, VmInfo};
use crate::rate_limiter::BucketUpdate;
use crate::vmm_config::instance_info::{InstanceInfo, VmState};
use crate::vstate::memory::{GuestMemory, GuestMemoryMmap, GuestMemoryRegion};
use crate::vstate::vcpu::VcpuState;
pub use crate::vstate::vcpu::{Vcpu, VcpuConfig, VcpuEvent, VcpuHandle, VcpuResponse};
pub use crate::vstate::vm::Vm;
/// Shorthand type for the EventManager flavour used by Firecracker.
pub type EventManager = BaseEventManager<Arc<Mutex<dyn MutEventSubscriber>>>;
// Since the exit code names e.g. `SIGBUS` are most appropriate yet trigger a test error with the
// clippy lint `upper_case_acronyms` we have disabled this lint for this enum.
/// Vmm exit-code type.
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FcExitCode {
/// Success exit code.
Ok = 0,
/// Generic error exit code.
GenericError = 1,
/// Generic exit code error; not possible to occur if the program logic is sound.
UnexpectedError = 2,
/// Firecracker was shut down after intercepting a restricted system call.
BadSyscall = 148,
/// Firecracker was shut down after intercepting `SIGBUS`.
SIGBUS = 149,
/// Firecracker was shut down after intercepting `SIGSEGV`.
SIGSEGV = 150,
/// Firecracker was shut down after intercepting `SIGXFSZ`.
SIGXFSZ = 151,
/// Firecracker was shut down after intercepting `SIGXCPU`.
SIGXCPU = 154,
/// Firecracker was shut down after intercepting `SIGPIPE`.
SIGPIPE = 155,
/// Firecracker was shut down after intercepting `SIGHUP`.
SIGHUP = 156,
/// Firecracker was shut down after intercepting `SIGILL`.
SIGILL = 157,
/// Bad configuration for microvm's resources, when using a single json.
BadConfiguration = 152,
/// Command line arguments parsing error.
ArgParsing = 153,
}
/// Timeout used in recv_timeout, when waiting for a vcpu response on
/// Pause/Resume/Save/Restore. A high enough limit that should not be reached during normal usage,
/// used to detect a potential vcpu deadlock.
pub const RECV_TIMEOUT_SEC: Duration = Duration::from_secs(30);
/// Default byte limit of accepted http requests on API and MMDS servers.
pub const HTTP_MAX_PAYLOAD_SIZE: usize = 51200;
/// Errors associated with the VMM internal logic. These errors cannot be generated by direct user
/// input, but can result from bad configuration of the host (for example if Firecracker doesn't
/// have permissions to open the KVM fd).
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VmmError {
#[cfg(target_arch = "aarch64")]
/// Invalid command line error.
Cmdline,
/// Device manager error: {0}
DeviceManager(#[from] device_manager::DeviceManagerCreateError),
/// MMIO Device manager error: {0}
MmioDeviceManager(device_manager::mmio::MmioError),
/// Error getting the KVM dirty bitmap. {0}
DirtyBitmap(kvm_ioctls::Error),
/// I8042 error: {0}
I8042Error(devices::legacy::I8042DeviceError),
#[cfg(target_arch = "x86_64")]
/// Cannot add devices to the legacy I/O Bus. {0}
LegacyIOBus(device_manager::legacy::LegacyDeviceError),
/// Metrics error: {0}
Metrics(MetricsError),
/// Cannot add a device to the MMIO Bus. {0}
RegisterMMIODevice(device_manager::mmio::MmioError),
/// Cannot install seccomp filters: {0}
SeccompFilters(seccomp::InstallationError),
/// Error writing to the serial console: {0}
Serial(io::Error),
/// Error creating the vcpu: {0}
VcpuCreate(vstate::vcpu::VcpuError),
/// Cannot send event to vCPU. {0}
VcpuEvent(vstate::vcpu::VcpuError),
/// Cannot create a vCPU handle. {0}
VcpuHandle(vstate::vcpu::VcpuError),
/// Failed to start vCPUs
VcpuStart(StartVcpusError),
/// Failed to pause the vCPUs.
VcpuPause,
/// Failed to exit the vCPUs.
VcpuExit,
/// Failed to resume the vCPUs.
VcpuResume,
/// Failed to message the vCPUs.
VcpuMessage,
/// Cannot spawn Vcpu thread: {0}
VcpuSpawn(io::Error),
/// Vm error: {0}
Vm(#[from] vstate::vm::VmError),
/// Kvm error: {0}
Kvm(#[from] vstate::kvm::KvmError),
/// Failed perform action on device: {0}
FindDeviceError(#[from] device_manager::FindDeviceError),
/// Block: {0}
Block(#[from] BlockError),
/// Balloon: {0}
Balloon(#[from] BalloonError),
/// Failed to create memory hotplug device: {0}
VirtioMem(#[from] VirtioMemError),
}
/// Shorthand type for KVM dirty page bitmap.
pub type DirtyBitmap = HashMap<u32, Vec<u64>>;
/// Returns the size of guest memory, in MiB.
pub(crate) fn mem_size_mib(guest_memory: &GuestMemoryMmap) -> u64 {
guest_memory.iter().map(|region| region.len()).sum::<u64>() >> 20
}
// Error type for [`Vmm::emulate_serial_init`].
/// Emulate serial init error: {0}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub struct EmulateSerialInitError(#[from] std::io::Error);
/// Error type for [`Vmm::start_vcpus`].
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum StartVcpusError {
/// VMM observer init error: {0}
VmmObserverInit(#[from] vmm_sys_util::errno::Error),
/// Vcpu handle error: {0}
VcpuHandle(#[from] StartThreadedError),
}
/// Error type for [`Vmm::dump_cpu_config()`]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum DumpCpuConfigError {
/// Failed to send event to vcpu thread: {0}
SendEvent(#[from] VcpuSendEventError),
/// Got unexpected response from vcpu thread.
UnexpectedResponse,
/// Failed to dump CPU config: {0}
DumpCpuConfig(#[from] vcpu::VcpuError),
/// Operation not allowed: {0}
NotAllowed(String),
}
/// Contains the state and associated methods required for the Firecracker VMM.
#[derive(Debug)]
pub struct Vmm {
/// The [`InstanceInfo`] state of this [`Vmm`].
pub instance_info: InstanceInfo,
shutdown_exit_code: Option<FcExitCode>,
// Guest VM core resources.
kvm: Kvm,
/// VM object
pub vm: Arc<Vm>,
// Save UFFD in order to keep it open in the Firecracker process, as well.
#[allow(unused)]
uffd: Option<Uffd>,
/// Handles to the vcpu threads with vcpu_fds inside them.
pub vcpus_handles: Vec<VcpuHandle>,
// Used by Vcpus and devices to initiate teardown; Vmm should never write here.
vcpus_exit_evt: EventFd,
// Device manager
device_manager: DeviceManager,
}
impl Vmm {
/// Gets Vmm version.
pub fn version(&self) -> String {
self.instance_info.vmm_version.clone()
}
/// Gets Vmm instance info.
pub fn instance_info(&self) -> InstanceInfo {
self.instance_info.clone()
}
/// Provides the Vmm shutdown exit code if there is one.
pub fn shutdown_exit_code(&self) -> Option<FcExitCode> {
self.shutdown_exit_code
}
/// Starts the microVM vcpus.
///
/// # Errors
///
/// When:
/// - [`vmm::VmmEventsObserver::on_vmm_boot`] errors.
/// - [`vmm::vstate::vcpu::Vcpu::start_threaded`] errors.
pub fn start_vcpus(
&mut self,
mut vcpus: Vec<Vcpu>,
vcpu_seccomp_filter: Arc<BpfProgram>,
) -> Result<(), StartVcpusError> {
let vcpu_count = vcpus.len();
let barrier = Arc::new(Barrier::new(vcpu_count + 1));
let stdin = std::io::stdin().lock();
// Set raw mode for stdin.
stdin.set_raw_mode().inspect_err(|&err| {
warn!("Cannot set raw mode for the terminal. {:?}", err);
})?;
// Set non blocking stdin.
stdin.set_non_block(true).inspect_err(|&err| {
warn!("Cannot set non block for the terminal. {:?}", err);
})?;
self.vcpus_handles.reserve(vcpu_count);
for mut vcpu in vcpus.drain(..) {
vcpu.set_mmio_bus(self.vm.common.mmio_bus.clone());
#[cfg(target_arch = "x86_64")]
vcpu.kvm_vcpu.set_pio_bus(self.vm.pio_bus.clone());
self.vcpus_handles.push(vcpu.start_threaded(
&self.vm,
vcpu_seccomp_filter.clone(),
barrier.clone(),
)?);
}
self.instance_info.state = VmState::Paused;
// Wait for vCPUs to initialize their TLS before moving forward.
barrier.wait();
Ok(())
}
/// Sends a resume command to the vCPUs.
pub fn resume_vm(&mut self) -> Result<(), VmmError> {
self.device_manager.kick_virtio_devices();
// Send the events.
self.vcpus_handles
.iter_mut()
.try_for_each(|handle| handle.send_event(VcpuEvent::Resume))
.map_err(|_| VmmError::VcpuMessage)?;
// Check the responses.
if self
.vcpus_handles
.iter()
.map(|handle| handle.response_receiver().recv_timeout(RECV_TIMEOUT_SEC))
.any(|response| !matches!(response, Ok(VcpuResponse::Resumed)))
{
return Err(VmmError::VcpuMessage);
}
self.instance_info.state = VmState::Running;
Ok(())
}
/// Sends a pause command to the vCPUs.
pub fn pause_vm(&mut self) -> Result<(), VmmError> {
// Send the events.
self.vcpus_handles
.iter_mut()
.try_for_each(|handle| handle.send_event(VcpuEvent::Pause))
.map_err(|_| VmmError::VcpuMessage)?;
// Check the responses.
if self
.vcpus_handles
.iter()
.map(|handle| handle.response_receiver().recv_timeout(RECV_TIMEOUT_SEC))
.any(|response| !matches!(response, Ok(VcpuResponse::Paused)))
{
return Err(VmmError::VcpuMessage);
}
self.instance_info.state = VmState::Paused;
Ok(())
}
/// Injects CTRL+ALT+DEL keystroke combo in the i8042 device.
#[cfg(target_arch = "x86_64")]
pub fn send_ctrl_alt_del(&mut self) -> Result<(), VmmError> {
self.device_manager
.legacy_devices
.i8042
.lock()
.expect("i8042 lock was poisoned")
.trigger_ctrl_alt_del()
.map_err(VmmError::I8042Error)
}
/// Saves the state of a paused Microvm.
pub fn save_state(&mut self, vm_info: &VmInfo) -> Result<MicrovmState, MicrovmStateError> {
use self::MicrovmStateError::SaveVmState;
let vcpu_states = self.save_vcpu_states()?;
let kvm_state = self.kvm.save_state();
let vm_state = {
#[cfg(target_arch = "x86_64")]
{
self.vm.save_state().map_err(SaveVmState)?
}
#[cfg(target_arch = "aarch64")]
{
let mpidrs = construct_kvm_mpidrs(&vcpu_states);
self.vm.save_state(&mpidrs).map_err(SaveVmState)?
}
};
let device_states = self.device_manager.save();
Ok(MicrovmState {
vm_info: vm_info.clone(),
kvm_state,
vm_state,
vcpu_states,
device_states,
})
}
fn save_vcpu_states(&mut self) -> Result<Vec<VcpuState>, MicrovmStateError> {
for handle in self.vcpus_handles.iter_mut() {
handle
.send_event(VcpuEvent::SaveState)
.map_err(MicrovmStateError::SignalVcpu)?;
}
let vcpu_responses = self
.vcpus_handles
.iter()
// `Iterator::collect` can transform a `Vec<Result>` into a `Result<Vec>`.
.map(|handle| handle.response_receiver().recv_timeout(RECV_TIMEOUT_SEC))
.collect::<Result<Vec<VcpuResponse>, RecvTimeoutError>>()
.map_err(|_| MicrovmStateError::UnexpectedVcpuResponse)?;
let vcpu_states = vcpu_responses
.into_iter()
.map(|response| match response {
VcpuResponse::SavedState(state) => Ok(*state),
VcpuResponse::Error(err) => Err(MicrovmStateError::SaveVcpuState(err)),
VcpuResponse::NotAllowed(reason) => Err(MicrovmStateError::NotAllowed(reason)),
_ => Err(MicrovmStateError::UnexpectedVcpuResponse),
})
.collect::<Result<Vec<VcpuState>, MicrovmStateError>>()?;
Ok(vcpu_states)
}
/// Dumps CPU configuration.
pub fn dump_cpu_config(&mut self) -> Result<Vec<CpuConfiguration>, DumpCpuConfigError> {
for handle in self.vcpus_handles.iter_mut() {
handle
.send_event(VcpuEvent::DumpCpuConfig)
.map_err(DumpCpuConfigError::SendEvent)?;
}
let vcpu_responses = self
.vcpus_handles
.iter()
.map(|handle| handle.response_receiver().recv_timeout(RECV_TIMEOUT_SEC))
.collect::<Result<Vec<VcpuResponse>, RecvTimeoutError>>()
.map_err(|_| DumpCpuConfigError::UnexpectedResponse)?;
let cpu_configs = vcpu_responses
.into_iter()
.map(|response| match response {
VcpuResponse::DumpedCpuConfig(cpu_config) => Ok(*cpu_config),
VcpuResponse::Error(err) => Err(DumpCpuConfigError::DumpCpuConfig(err)),
VcpuResponse::NotAllowed(reason) => Err(DumpCpuConfigError::NotAllowed(reason)),
_ => Err(DumpCpuConfigError::UnexpectedResponse),
})
.collect::<Result<Vec<CpuConfiguration>, DumpCpuConfigError>>()?;
Ok(cpu_configs)
}
/// Updates the path of the host file backing the emulated block device with id `drive_id`.
/// We update the disk image on the device and its virtio configuration.
pub fn update_block_device_path(
&mut self,
drive_id: &str,
path_on_host: String,
) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(drive_id, |block: &mut Block| {
block.update_disk_image(path_on_host)
})??;
Ok(())
}
/// Updates the rate limiter parameters for block device with `drive_id` id.
pub fn update_block_rate_limiter(
&mut self,
drive_id: &str,
rl_bytes: BucketUpdate,
rl_ops: BucketUpdate,
) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(drive_id, |block: &mut Block| {
block.update_rate_limiter(rl_bytes, rl_ops)
})??;
Ok(())
}
/// Updates the rate limiter parameters for block device with `drive_id` id.
pub fn update_vhost_user_block_config(&mut self, drive_id: &str) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(drive_id, |block: &mut Block| block.update_config())??;
Ok(())
}
/// Updates the rate limiter parameters for net device with `net_id` id.
pub fn update_net_rate_limiters(
&mut self,
net_id: &str,
rx_bytes: BucketUpdate,
rx_ops: BucketUpdate,
tx_bytes: BucketUpdate,
tx_ops: BucketUpdate,
) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(net_id, |net: &mut Net| {
net.patch_rate_limiters(rx_bytes, rx_ops, tx_bytes, tx_ops)
})?;
Ok(())
}
/// Returns a reference to the balloon device if present.
pub fn balloon_config(&self) -> Result<BalloonConfig, VmmError> {
let config = self
.device_manager
.with_virtio_device(BALLOON_DEV_ID, |dev: &mut Balloon| dev.config())?;
Ok(config)
}
/// Returns the latest balloon statistics if they are enabled.
pub fn latest_balloon_stats(&self) -> Result<BalloonStats, VmmError> {
let stats = self
.device_manager
.with_virtio_device(BALLOON_DEV_ID, |dev: &mut Balloon| dev.latest_stats())??;
Ok(stats)
}
/// Updates configuration for the balloon device target size.
pub fn update_balloon_config(&mut self, amount_mib: u32) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(BALLOON_DEV_ID, |dev: &mut Balloon| {
dev.update_size(amount_mib)
})??;
Ok(())
}
/// Updates configuration for the balloon device as described in `balloon_stats_update`.
pub fn update_balloon_stats_config(
&mut self,
stats_polling_interval_s: u16,
) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(BALLOON_DEV_ID, |dev: &mut Balloon| {
dev.update_stats_polling_interval(stats_polling_interval_s)
})??;
Ok(())
}
/// Returns the current state of the memory hotplug device.
pub fn memory_hotplug_status(&self) -> Result<VirtioMemStatus, VmmError> {
self.device_manager
.with_virtio_device(VIRTIO_MEM_DEV_ID, |dev: &mut VirtioMem| dev.status())
.map_err(VmmError::FindDeviceError)
}
/// Returns the current state of the memory hotplug device.
pub fn update_memory_hotplug_size(&self, requested_size_mib: usize) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(VIRTIO_MEM_DEV_ID, |dev: &mut VirtioMem| {
dev.update_requested_size(requested_size_mib)
})
.map_err(VmmError::FindDeviceError)??;
Ok(())
}
/// Starts the balloon free page hinting run
pub fn start_balloon_hinting(&mut self, cmd: StartHintingCmd) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(BALLOON_DEV_ID, |dev: &mut Balloon| dev.start_hinting(cmd))??;
Ok(())
}
/// Retrieves the status of the balloon hinting run
pub fn get_balloon_hinting_status(&mut self) -> Result<HintingStatus, VmmError> {
let status = self
.device_manager
.with_virtio_device(BALLOON_DEV_ID, |dev: &mut Balloon| dev.get_hinting_status())??;
Ok(status)
}
/// Stops the balloon free page hinting run
pub fn stop_balloon_hinting(&mut self) -> Result<(), VmmError> {
self.device_manager
.with_virtio_device(BALLOON_DEV_ID, |dev: &mut Balloon| dev.stop_hinting())??;
Ok(())
}
/// Signals Vmm to stop and exit.
pub fn stop(&mut self, exit_code: FcExitCode) {
// To avoid cycles, all teardown paths take the following route:
// +------------------------+----------------------------+------------------------+
// | Vmm | Action | Vcpu |
// +------------------------+----------------------------+------------------------+
// 1 | | | vcpu.exit(exit_code) |
// 2 | | | vcpu.exit_evt.write(1) |
// 3 | | <--- EventFd::exit_evt --- | |
// 4 | vmm.stop() | | |
// 5 | | --- VcpuEvent::Finish ---> | |
// 6 | | | StateMachine::finish() |
// 7 | VcpuHandle::join() | | |
// 8 | vmm.shutdown_exit_code becomes Some(exit_code) breaking the main event loop |
// +------------------------+----------------------------+------------------------+
// Vcpu initiated teardown starts from `fn Vcpu::exit()` (step 1).
// Vmm initiated teardown starts from `pub fn Vmm::stop()` (step 4).
// Once `vmm.shutdown_exit_code` becomes `Some(exit_code)`, it is the upper layer's
// responsibility to break main event loop and propagate the exit code value.
info!("Vmm is stopping.");
// We send a "Finish" event. If a VCPU has already exited, this is the only
// message it will accept... but running and paused will take it as well.
// It breaks out of the state machine loop so that the thread can be joined.
for (idx, handle) in self.vcpus_handles.iter_mut().enumerate() {
if let Err(err) = handle.send_event(VcpuEvent::Finish) {
error!("Failed to send VcpuEvent::Finish to vCPU {}: {}", idx, err);
}
}
// The actual thread::join() that runs to release the thread's resource is done in
// the VcpuHandle's Drop trait. We can trigger that to happen now by clearing the
// list of handles. Do it here instead of Vmm::Drop to avoid dependency cycles.
// (Vmm's Drop will also check if this list is empty).
self.vcpus_handles.clear();
// Break the main event loop, propagating the Vmm exit-code.
self.shutdown_exit_code = Some(exit_code);
}
/// Gets a reference to kvm-ioctls Vm
#[cfg(feature = "gdb")]
pub fn vm(&self) -> &Vm {
&self.vm
}
}
/// Process the content of the MPIDR_EL1 register in order to be able to pass it to KVM
///
/// The kernel expects to find the four affinity levels of the MPIDR in the first 32 bits of the
/// VGIC register attribute:
/// https://elixir.free-electrons.com/linux/v4.14.203/source/virt/kvm/arm/vgic/vgic-kvm-device.c#L445.
///
/// The format of the MPIDR_EL1 register is:
/// | 39 .... 32 | 31 .... 24 | 23 .... 16 | 15 .... 8 | 7 .... 0 |
/// | Aff3 | Other | Aff2 | Aff1 | Aff0 |
///
/// The KVM mpidr format is:
/// | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 |
/// | Aff3 | Aff2 | Aff1 | Aff0 |
/// As specified in the linux kernel: Documentation/virt/kvm/devices/arm-vgic-v3.rst
#[cfg(target_arch = "aarch64")]
fn construct_kvm_mpidrs(vcpu_states: &[VcpuState]) -> Vec<u64> {
vcpu_states
.iter()
.map(|state| {
let cpu_affid = ((state.mpidr & 0xFF_0000_0000) >> 8) | (state.mpidr & 0xFF_FFFF);
cpu_affid << 32
})
.collect()
}
impl Drop for Vmm {
fn drop(&mut self) {
// There are two cases when `drop()` is called:
// 1) before the Vmm has been mutexed and subscribed to the event manager, or
// 2) after the Vmm has been registered as a subscriber to the event manager.
//
// The first scenario is bound to happen if an error is raised during
// Vmm creation (for example, during snapshot load), before the Vmm has
// been subscribed to the event manager. If that happens, the `drop()`
// function is called right before propagating the error. In order to
// be able to gracefully exit Firecracker with the correct fault
// message, we need to prepare the Vmm contents for the tear down
// (join the vcpu threads). Explicitly calling `stop()` allows the
// Vmm to be successfully dropped and firecracker to propagate the
// error.
//
// In the second case, before dropping the Vmm object, the event
// manager calls `stop()`, which sends a `Finish` event to the vcpus
// and joins the vcpu threads. The Vmm is dropped after everything is
// ready to be teared down. The line below is a no-op, because the Vmm
// has already been stopped by the event manager at this point.
self.stop(self.shutdown_exit_code.unwrap_or(FcExitCode::Ok));
if let Err(err) = std::io::stdin().lock().set_canon_mode() {
warn!("Cannot set canonical mode for the terminal. {:?}", err);
}
// Write the metrics before exiting.
if let Err(err) = METRICS.write() {
error!("Failed to write metrics while stopping: {}", err);
}
if !self.vcpus_handles.is_empty() {
error!("Failed to tear down Vmm: the vcpu threads have not finished execution.");
}
}
}
impl MutEventSubscriber for Vmm {
/// Handle a read event (EPOLLIN).
fn process(&mut self, event: Events, _: &mut EventOps) {
let source = event.fd();
let event_set = event.event_set();
if source == self.vcpus_exit_evt.as_raw_fd() && event_set == EventSet::IN {
// Exit event handling should never do anything more than call 'self.stop()'.
let _ = self.vcpus_exit_evt.read();
let exit_code = 'exit_code: {
// Query each vcpu for their exit_code.
for handle in &self.vcpus_handles {
// Drain all vcpu responses that are pending from this vcpu until we find an
// exit status.
for response in handle.response_receiver().try_iter() {
if let VcpuResponse::Exited(status) = response {
// It could be that some vcpus exited successfully while others
// errored out. Thus make sure that error exits from one vcpu always
// takes precedence over "ok" exits
if status != FcExitCode::Ok {
break 'exit_code status;
}
}
}
}
// No CPUs exited with error status code, report "Ok"
FcExitCode::Ok
};
self.stop(exit_code);
} else {
error!("Spurious EventManager event for handler: Vmm");
}
}
fn init(&mut self, ops: &mut EventOps) {
if let Err(err) = ops.add(Events::new(&self.vcpus_exit_evt, EventSet::IN)) {
error!("Failed to register vmm exit event: {}", err);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/initrd.rs | src/vmm/src/initrd.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::File;
use std::os::unix::fs::MetadataExt;
use vm_memory::{GuestAddress, GuestMemory, ReadVolatile, VolatileMemoryError};
use crate::arch::initrd_load_addr;
use crate::utils::u64_to_usize;
use crate::vmm_config::boot_source::BootConfig;
use crate::vstate::memory::GuestMemoryMmap;
/// Errors associated with initrd loading.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum InitrdError {
/// Failed to compute the initrd address.
Address,
/// Cannot load initrd due to an invalid memory configuration.
Load,
/// Cannot image metadata: {0}
Metadata(std::io::Error),
/// Cannot copy initrd file fd: {0}
CloneFd(std::io::Error),
/// Cannot load initrd due to an invalid image: {0}
Read(VolatileMemoryError),
}
/// Type for passing information about the initrd in the guest memory.
#[derive(Debug)]
pub struct InitrdConfig {
/// Load address of initrd in guest memory
pub address: GuestAddress,
/// Size of initrd in guest memory
pub size: usize,
}
impl InitrdConfig {
/// Load initrd into guest memory based on the boot config.
pub fn from_config(
boot_cfg: &BootConfig,
vm_memory: &GuestMemoryMmap,
) -> Result<Option<Self>, InitrdError> {
Ok(match &boot_cfg.initrd_file {
Some(f) => {
let f = f.try_clone().map_err(InitrdError::CloneFd)?;
Some(Self::from_file(vm_memory, f)?)
}
None => None,
})
}
/// Loads the initrd from a file into guest memory.
pub fn from_file(vm_memory: &GuestMemoryMmap, mut file: File) -> Result<Self, InitrdError> {
let size = file.metadata().map_err(InitrdError::Metadata)?.size();
let size = u64_to_usize(size);
let Some(address) = initrd_load_addr(vm_memory, size) else {
return Err(InitrdError::Address);
};
let mut slice = vm_memory
.get_slice(GuestAddress(address), size)
.map_err(|_| InitrdError::Load)?;
file.read_exact_volatile(&mut slice)
.map_err(InitrdError::Read)?;
Ok(InitrdConfig {
address: GuestAddress(address),
size,
})
}
}
#[cfg(test)]
mod tests {
use std::io::{Seek, SeekFrom, Write};
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::arch::GUEST_PAGE_SIZE;
use crate::test_utils::{single_region_mem, single_region_mem_at};
fn make_test_bin() -> Vec<u8> {
let mut fake_bin = Vec::new();
fake_bin.resize(1_000_000, 0xAA);
fake_bin
}
#[test]
// Test that loading the initrd is successful on different archs.
fn test_load_initrd() {
let image = make_test_bin();
let mem_size: usize = image.len() * 2 + GUEST_PAGE_SIZE;
let tempfile = TempFile::new().unwrap();
let mut tempfile = tempfile.into_file();
tempfile.write_all(&image).unwrap();
#[cfg(target_arch = "x86_64")]
let gm = single_region_mem(mem_size);
#[cfg(target_arch = "aarch64")]
let gm = single_region_mem(mem_size + crate::arch::aarch64::layout::FDT_MAX_SIZE);
// Need to reset the cursor to read initrd properly.
tempfile.seek(SeekFrom::Start(0)).unwrap();
let initrd = InitrdConfig::from_file(&gm, tempfile).unwrap();
assert!(gm.address_in_range(initrd.address));
assert_eq!(initrd.size, image.len());
}
#[test]
fn test_load_initrd_no_memory() {
let gm = single_region_mem(79);
let image = make_test_bin();
let tempfile = TempFile::new().unwrap();
let mut tempfile = tempfile.into_file();
tempfile.write_all(&image).unwrap();
// Need to reset the cursor to read initrd properly.
tempfile.seek(SeekFrom::Start(0)).unwrap();
let res = InitrdConfig::from_file(&gm, tempfile);
assert!(matches!(res, Err(InitrdError::Address)), "{:?}", res);
}
#[test]
fn test_load_initrd_unaligned() {
let image = vec![1, 2, 3, 4];
let tempfile = TempFile::new().unwrap();
let mut tempfile = tempfile.into_file();
tempfile.write_all(&image).unwrap();
let gm = single_region_mem_at(GUEST_PAGE_SIZE as u64 + 1, image.len() * 2);
// Need to reset the cursor to read initrd properly.
tempfile.seek(SeekFrom::Start(0)).unwrap();
let res = InitrdConfig::from_file(&gm, tempfile);
assert!(matches!(res, Err(InitrdError::Address)), "{:?}", res);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/signal_handler.rs | src/vmm/src/signal_handler.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use libc::{
SIGBUS, SIGHUP, SIGILL, SIGPIPE, SIGSEGV, SIGSYS, SIGXCPU, SIGXFSZ, c_int, c_void, siginfo_t,
};
use log::error;
use crate::FcExitCode;
use crate::logger::{IncMetric, METRICS, StoreMetric};
use crate::utils::signal::register_signal_handler;
// The offset of `si_syscall` (offending syscall identifier) within the siginfo structure
// expressed as an `(u)int*`.
// Offset `6` for an `i32` field means that the needed information is located at `6 * sizeof(i32)`.
// See /usr/include/linux/signal.h for the C struct definition.
// See https://github.com/rust-lang/libc/issues/716 for why the offset is different in Rust.
const SI_OFF_SYSCALL: isize = 6;
const SYS_SECCOMP_CODE: i32 = 1;
#[inline]
fn exit_with_code(exit_code: FcExitCode) {
// Write the metrics before exiting.
if let Err(err) = METRICS.write() {
error!("Failed to write metrics while stopping: {}", err);
}
// SAFETY: Safe because we're terminating the process anyway.
unsafe { libc::_exit(exit_code as i32) };
}
macro_rules! generate_handler {
($fn_name:ident ,$signal_name:ident, $exit_code:ident, $signal_metric:expr, $body:ident) => {
#[inline(always)]
extern "C" fn $fn_name(num: c_int, info: *mut siginfo_t, _unused: *mut c_void) {
// SAFETY: Safe because we're just reading some fields from a supposedly valid argument.
let si_signo = unsafe { (*info).si_signo };
// SAFETY: Safe because we're just reading some fields from a supposedly valid argument.
let si_code = unsafe { (*info).si_code };
if num != si_signo || num != $signal_name {
exit_with_code(FcExitCode::UnexpectedError);
}
$signal_metric.store(1);
error!(
"Shutting down VM after intercepting signal {}, code {}.",
si_signo, si_code
);
$body(si_code, info);
match si_signo {
$signal_name => exit_with_code(crate::FcExitCode::$exit_code),
_ => exit_with_code(FcExitCode::UnexpectedError),
};
}
};
}
fn log_sigsys_err(si_code: c_int, info: *mut siginfo_t) {
if si_code != SYS_SECCOMP_CODE {
// We received a SIGSYS for a reason other than `bad syscall`.
exit_with_code(FcExitCode::UnexpectedError);
}
// SAFETY: Other signals which might do async unsafe things incompatible with the rest of this
// function are blocked due to the sa_mask used when registering the signal handler.
let syscall = unsafe { *(info as *const i32).offset(SI_OFF_SYSCALL) };
error!(
"Shutting down VM after intercepting a bad syscall ({}).",
syscall
);
}
fn empty_fn(_si_code: c_int, _info: *mut siginfo_t) {}
generate_handler!(
sigxfsz_handler,
SIGXFSZ,
SIGXFSZ,
METRICS.signals.sigxfsz,
empty_fn
);
generate_handler!(
sigxcpu_handler,
SIGXCPU,
SIGXCPU,
METRICS.signals.sigxcpu,
empty_fn
);
generate_handler!(
sigbus_handler,
SIGBUS,
SIGBUS,
METRICS.signals.sigbus,
empty_fn
);
generate_handler!(
sigsegv_handler,
SIGSEGV,
SIGSEGV,
METRICS.signals.sigsegv,
empty_fn
);
generate_handler!(
sigsys_handler,
SIGSYS,
BadSyscall,
METRICS.seccomp.num_faults,
log_sigsys_err
);
generate_handler!(
sighup_handler,
SIGHUP,
SIGHUP,
METRICS.signals.sighup,
empty_fn
);
generate_handler!(
sigill_handler,
SIGILL,
SIGILL,
METRICS.signals.sigill,
empty_fn
);
#[inline(always)]
extern "C" fn sigpipe_handler(num: c_int, info: *mut siginfo_t, _unused: *mut c_void) {
// Just record the metric and allow the process to continue, the EPIPE error needs
// to be handled at caller level.
// SAFETY: Safe because we're just reading some fields from a supposedly valid argument.
let si_signo = unsafe { (*info).si_signo };
// SAFETY: Safe because we're just reading some fields from a supposedly valid argument.
let si_code = unsafe { (*info).si_code };
if num != si_signo || num != SIGPIPE {
error!("Received invalid signal {}, code {}.", si_signo, si_code);
return;
}
METRICS.signals.sigpipe.inc();
error!("Received signal {}, code {}.", si_signo, si_code);
}
/// Registers all the required signal handlers.
///
/// Custom handlers are installed for: `SIGBUS`, `SIGSEGV`, `SIGSYS`
/// `SIGXFSZ` `SIGXCPU` `SIGPIPE` `SIGHUP` and `SIGILL`.
pub fn register_signal_handlers() -> vmm_sys_util::errno::Result<()> {
// Call to unsafe register_signal_handler which is considered unsafe because it will
// register a signal handler which will be called in the current thread and will interrupt
// whatever work is done on the current thread, so we have to keep in mind that the registered
// signal handler must only do async-signal-safe operations.
register_signal_handler(SIGSYS, sigsys_handler)?;
register_signal_handler(SIGBUS, sigbus_handler)?;
register_signal_handler(SIGSEGV, sigsegv_handler)?;
register_signal_handler(SIGXFSZ, sigxfsz_handler)?;
register_signal_handler(SIGXCPU, sigxcpu_handler)?;
register_signal_handler(SIGPIPE, sigpipe_handler)?;
register_signal_handler(SIGHUP, sighup_handler)?;
register_signal_handler(SIGILL, sigill_handler)?;
Ok(())
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/seccomp.rs | src/vmm/src/seccomp.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::io::Read;
use std::sync::Arc;
use bincode::config;
use bincode::config::{Configuration, Fixint, Limit, LittleEndian};
// This byte limit is passed to `bincode` to guard against a potential memory
// allocation DOS caused by binary filters that are too large.
// This limit can be safely determined since the maximum length of a BPF
// filter is 4096 instructions and Firecracker has a finite number of threads.
const DESERIALIZATION_BYTES_LIMIT: usize = 100_000;
const BINCODE_CONFIG: Configuration<LittleEndian, Fixint, Limit<DESERIALIZATION_BYTES_LIMIT>> =
config::standard()
.with_fixed_int_encoding()
.with_limit::<DESERIALIZATION_BYTES_LIMIT>()
.with_little_endian();
/// Each BPF instruction is 8 bytes long and 4 byte aligned.
/// This alignment needs to be satisfied in order for a BPF code to be accepted
/// by the syscalls. Using u64 here is is safe as it has same size and even bigger alignment.
pub type BpfInstruction = u64;
/// Program made up of a sequence of BPF instructions.
pub type BpfProgram = Vec<BpfInstruction>;
/// Reference to program made up of a sequence of BPF instructions.
pub type BpfProgramRef<'a> = &'a [BpfInstruction];
/// Type that associates a thread category to a BPF program.
pub type BpfThreadMap = HashMap<String, Arc<BpfProgram>>;
/// Binary filter deserialization errors.
pub type DeserializationError = bincode::error::DecodeError;
/// Retrieve empty seccomp filters.
pub fn get_empty_filters() -> BpfThreadMap {
let mut map = BpfThreadMap::new();
map.insert("vmm".to_string(), Arc::new(vec![]));
map.insert("api".to_string(), Arc::new(vec![]));
map.insert("vcpu".to_string(), Arc::new(vec![]));
map
}
/// Deserialize binary with bpf filters
pub fn deserialize_binary<R: Read>(mut reader: R) -> Result<BpfThreadMap, DeserializationError> {
let result: HashMap<String, _> = bincode::decode_from_std_read(&mut reader, BINCODE_CONFIG)?;
Ok(result
.into_iter()
.map(|(k, v)| (k.to_lowercase(), Arc::new(v)))
.collect())
}
/// Filter installation errors.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum InstallationError {
/// Filter length exceeds the maximum size of {BPF_MAX_LEN:} instructions
FilterTooLarge,
/// prctl` syscall failed with error code: {0}
Prctl(std::io::Error),
}
/// The maximum seccomp-BPF program length allowed by the linux kernel.
pub const BPF_MAX_LEN: usize = 4096;
/// BPF structure definition for filter array.
/// See /usr/include/linux/filter.h .
#[repr(C)]
#[derive(Debug)]
struct SockFprog {
len: u16,
filter: *const BpfInstruction,
}
/// Apply bpf filter.
pub fn apply_filter(bpf_filter: BpfProgramRef) -> Result<(), InstallationError> {
// If the program is empty, don't install the filter.
if bpf_filter.is_empty() {
return Ok(());
}
// If the program length is greater than the limit allowed by the kernel,
// fail quickly. Otherwise, `prctl` will give a more cryptic error code.
if BPF_MAX_LEN < bpf_filter.len() {
return Err(InstallationError::FilterTooLarge);
}
let bpf_filter_len =
u16::try_from(bpf_filter.len()).map_err(|_| InstallationError::FilterTooLarge)?;
// SAFETY: Safe because the parameters are valid.
unsafe {
{
let rc = libc::prctl(libc::PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
if rc != 0 {
return Err(InstallationError::Prctl(std::io::Error::last_os_error()));
}
}
let bpf_prog = SockFprog {
len: bpf_filter_len,
filter: bpf_filter.as_ptr(),
};
let bpf_prog_ptr = &bpf_prog as *const SockFprog;
{
let rc = libc::syscall(
libc::SYS_seccomp,
libc::SECCOMP_SET_MODE_FILTER,
0,
bpf_prog_ptr,
);
if rc != 0 {
return Err(InstallationError::Prctl(std::io::Error::last_os_error()));
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::collections::HashMap;
use std::sync::Arc;
use std::thread;
use super::*;
#[test]
fn test_deserialize_binary() {
// Malformed bincode binary.
let data = "adassafvc".to_string();
deserialize_binary(data.as_bytes()).unwrap_err();
// Test that the binary deserialization is correct, and that the thread keys
// have been lowercased.
let bpf_prog = vec![0; 2];
let mut filter_map: HashMap<String, BpfProgram> = HashMap::new();
filter_map.insert("VcpU".to_string(), bpf_prog.clone());
let bytes = bincode::serde::encode_to_vec(&filter_map, BINCODE_CONFIG).unwrap();
let mut expected_res = BpfThreadMap::new();
expected_res.insert("vcpu".to_string(), Arc::new(bpf_prog));
assert_eq!(deserialize_binary(&bytes[..]).unwrap(), expected_res);
let bpf_prog = vec![0; DESERIALIZATION_BYTES_LIMIT + 1];
let mut filter_map: HashMap<String, BpfProgram> = HashMap::new();
filter_map.insert("VcpU".to_string(), bpf_prog.clone());
let bytes = bincode::serde::encode_to_vec(&filter_map, BINCODE_CONFIG).unwrap();
assert!(matches!(
deserialize_binary(&bytes[..]).unwrap_err(),
bincode::error::DecodeError::LimitExceeded
));
}
#[test]
fn test_filter_apply() {
// Test filter too large.
thread::spawn(|| {
let filter: BpfProgram = vec![0; 5000];
// Apply seccomp filter.
assert!(matches!(
apply_filter(&filter).unwrap_err(),
InstallationError::FilterTooLarge
));
})
.join()
.unwrap();
// Test empty filter.
thread::spawn(|| {
let filter: BpfProgram = vec![];
assert_eq!(filter.len(), 0);
let seccomp_level = unsafe { libc::prctl(libc::PR_GET_SECCOMP) };
assert_eq!(seccomp_level, 0);
apply_filter(&filter).unwrap();
// test that seccomp level remains 0 on failure.
let seccomp_level = unsafe { libc::prctl(libc::PR_GET_SECCOMP) };
assert_eq!(seccomp_level, 0);
})
.join()
.unwrap();
// Test invalid BPF code.
thread::spawn(|| {
let filter = vec![0xFF; 1];
let seccomp_level = unsafe { libc::prctl(libc::PR_GET_SECCOMP) };
assert_eq!(seccomp_level, 0);
assert!(matches!(
apply_filter(&filter).unwrap_err(),
InstallationError::Prctl(_)
));
// test that seccomp level remains 0 on failure.
let seccomp_level = unsafe { libc::prctl(libc::PR_GET_SECCOMP) };
assert_eq!(seccomp_level, 0);
})
.join()
.unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/resources.rs | src/vmm/src/resources.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::From;
use std::path::PathBuf;
use std::sync::{Arc, Mutex, MutexGuard};
use serde::{Deserialize, Serialize};
use vm_memory::GuestAddress;
use crate::cpu_config::templates::CustomCpuTemplate;
use crate::logger::info;
use crate::mmds;
use crate::mmds::data_store::{Mmds, MmdsVersion};
use crate::mmds::ns::MmdsNetworkStack;
use crate::utils::mib_to_bytes;
use crate::utils::net::ipv4addr::is_link_local_valid;
use crate::vmm_config::balloon::*;
use crate::vmm_config::boot_source::{
BootConfig, BootSource, BootSourceConfig, BootSourceConfigError,
};
use crate::vmm_config::drive::*;
use crate::vmm_config::entropy::*;
use crate::vmm_config::instance_info::InstanceInfo;
use crate::vmm_config::machine_config::{MachineConfig, MachineConfigError, MachineConfigUpdate};
use crate::vmm_config::memory_hotplug::{MemoryHotplugConfig, MemoryHotplugConfigError};
use crate::vmm_config::metrics::{MetricsConfig, MetricsConfigError, init_metrics};
use crate::vmm_config::mmds::{MmdsConfig, MmdsConfigError};
use crate::vmm_config::net::*;
use crate::vmm_config::pmem::{PmemBuilder, PmemConfig, PmemConfigError};
use crate::vmm_config::serial::SerialConfig;
use crate::vmm_config::vsock::*;
use crate::vstate::memory;
use crate::vstate::memory::{GuestRegionMmap, MemoryError};
/// Errors encountered when configuring microVM resources.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum ResourcesError {
/// Balloon device error: {0}
BalloonDevice(#[from] BalloonConfigError),
/// Block device error: {0}
BlockDevice(#[from] DriveError),
/// Boot source error: {0}
BootSource(#[from] BootSourceConfigError),
/// File operation error: {0}
File(#[from] std::io::Error),
/// Invalid JSON: {0}
InvalidJson(#[from] serde_json::Error),
/// Logger error: {0}
Logger(#[from] crate::logger::LoggerUpdateError),
/// Metrics error: {0}
Metrics(#[from] MetricsConfigError),
/// MMDS error: {0}
Mmds(#[from] mmds::data_store::MmdsDatastoreError),
/// MMDS config error: {0}
MmdsConfig(#[from] MmdsConfigError),
/// Network device error: {0}
NetDevice(#[from] NetworkInterfaceError),
/// VM config error: {0}
MachineConfig(#[from] MachineConfigError),
/// Vsock device error: {0}
VsockDevice(#[from] VsockConfigError),
/// Entropy device error: {0}
EntropyDevice(#[from] EntropyDeviceError),
/// Pmem device error: {0}
PmemDevice(#[from] PmemConfigError),
/// Memory hotplug config error: {0}
MemoryHotplugConfig(#[from] MemoryHotplugConfigError),
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
#[serde(untagged)]
enum CustomCpuTemplateOrPath {
Path(PathBuf),
Template(CustomCpuTemplate),
}
/// Used for configuring a vmm from one single json passed to the Firecracker process.
#[derive(Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct VmmConfig {
balloon: Option<BalloonDeviceConfig>,
drives: Vec<BlockDeviceConfig>,
boot_source: BootSourceConfig,
cpu_config: Option<CustomCpuTemplateOrPath>,
logger: Option<crate::logger::LoggerConfig>,
machine_config: Option<MachineConfig>,
metrics: Option<MetricsConfig>,
mmds_config: Option<MmdsConfig>,
#[serde(default)]
network_interfaces: Vec<NetworkInterfaceConfig>,
vsock: Option<VsockDeviceConfig>,
entropy: Option<EntropyDeviceConfig>,
#[serde(default, rename = "pmem")]
pmem_devices: Vec<PmemConfig>,
#[serde(skip)]
serial_config: Option<SerialConfig>,
memory_hotplug: Option<MemoryHotplugConfig>,
}
/// A data structure that encapsulates the device configurations
/// held in the Vmm.
#[derive(Debug, Default)]
pub struct VmResources {
/// The vCpu and memory configuration for this microVM.
pub machine_config: MachineConfig,
/// The boot source spec (contains both config and builder) for this microVM.
pub boot_source: BootSource,
/// The block devices.
pub block: BlockBuilder,
/// The vsock device.
pub vsock: VsockBuilder,
/// The balloon device.
pub balloon: BalloonBuilder,
/// The network devices builder.
pub net_builder: NetBuilder,
/// The entropy device builder.
pub entropy: EntropyDeviceBuilder,
/// The pmem devices.
pub pmem: PmemBuilder,
/// The memory hotplug configuration.
pub memory_hotplug: Option<MemoryHotplugConfig>,
/// The optional Mmds data store.
// This is initialised on demand (if ever used), so that we don't allocate it unless it's
// actually used.
pub mmds: Option<Arc<Mutex<Mmds>>>,
/// Data store limit for the mmds.
pub mmds_size_limit: usize,
/// Whether or not to load boot timer device.
pub boot_timer: bool,
/// Whether or not to use PCIe transport for VirtIO devices.
pub pci_enabled: bool,
/// Where serial console output should be written to
pub serial_out_path: Option<PathBuf>,
}
impl VmResources {
/// Configures Vmm resources as described by the `config_json` param.
pub fn from_json(
config_json: &str,
instance_info: &InstanceInfo,
mmds_size_limit: usize,
metadata_json: Option<&str>,
) -> Result<Self, ResourcesError> {
let vmm_config = serde_json::from_str::<VmmConfig>(config_json)?;
if let Some(logger_config) = vmm_config.logger {
crate::logger::LOGGER.update(logger_config)?;
}
if let Some(metrics) = vmm_config.metrics {
init_metrics(metrics)?;
}
let mut resources: Self = Self {
mmds_size_limit,
..Default::default()
};
if let Some(machine_config) = vmm_config.machine_config {
let machine_config = MachineConfigUpdate::from(machine_config);
resources.update_machine_config(&machine_config)?;
}
if let Some(either) = vmm_config.cpu_config {
match either {
CustomCpuTemplateOrPath::Path(path) => {
let cpu_config_json =
std::fs::read_to_string(path).map_err(ResourcesError::File)?;
let cpu_template = CustomCpuTemplate::try_from(cpu_config_json.as_str())?;
resources.set_custom_cpu_template(cpu_template);
}
CustomCpuTemplateOrPath::Template(template) => {
resources.set_custom_cpu_template(template)
}
}
}
resources.build_boot_source(vmm_config.boot_source)?;
for drive_config in vmm_config.drives.into_iter() {
resources.set_block_device(drive_config)?;
}
for net_config in vmm_config.network_interfaces.into_iter() {
resources.build_net_device(net_config)?;
}
if let Some(vsock_config) = vmm_config.vsock {
resources.set_vsock_device(vsock_config)?;
}
if let Some(balloon_config) = vmm_config.balloon {
resources.set_balloon_device(balloon_config)?;
}
// Init the data store from file, if present.
if let Some(data) = metadata_json {
resources.locked_mmds_or_default()?.put_data(
serde_json::from_str(data).expect("MMDS error: metadata provided not valid json"),
)?;
info!("Successfully added metadata to mmds from file");
}
if let Some(mmds_config) = vmm_config.mmds_config {
resources.set_mmds_config(mmds_config, &instance_info.id)?;
}
if let Some(entropy_device_config) = vmm_config.entropy {
resources.build_entropy_device(entropy_device_config)?;
}
for pmem_config in vmm_config.pmem_devices.into_iter() {
resources.build_pmem_device(pmem_config)?;
}
if let Some(serial_cfg) = vmm_config.serial_config {
resources.serial_out_path = serial_cfg.serial_out_path;
}
if let Some(memory_hotplug_config) = vmm_config.memory_hotplug {
resources.set_memory_hotplug_config(memory_hotplug_config)?;
}
Ok(resources)
}
/// If not initialised, create the mmds data store with the default config.
pub fn mmds_or_default(&mut self) -> Result<&Arc<Mutex<Mmds>>, MmdsConfigError> {
Ok(self
.mmds
.get_or_insert(Arc::new(Mutex::new(Mmds::try_new(self.mmds_size_limit)?))))
}
/// If not initialised, create the mmds data store with the default config.
pub fn locked_mmds_or_default(&mut self) -> Result<MutexGuard<'_, Mmds>, MmdsConfigError> {
let mmds = self.mmds_or_default()?;
Ok(mmds.lock().expect("Poisoned lock"))
}
/// Add a custom CPU template to the VM resources
/// to configure vCPUs.
pub fn set_custom_cpu_template(&mut self, cpu_template: CustomCpuTemplate) {
self.machine_config.set_custom_cpu_template(cpu_template);
}
/// Updates the configuration of the microVM.
pub fn update_machine_config(
&mut self,
update: &MachineConfigUpdate,
) -> Result<(), MachineConfigError> {
let updated = self.machine_config.update(update)?;
// The VM cannot have a memory size smaller than the target size
// of the balloon device, if present.
if self.balloon.get().is_some()
&& updated.mem_size_mib
< self
.balloon
.get_config()
.map_err(|_| MachineConfigError::InvalidVmState)?
.amount_mib as usize
{
return Err(MachineConfigError::IncompatibleBalloonSize);
}
self.machine_config = updated;
Ok(())
}
// Repopulate the MmdsConfig based on information from the data store
// and the associated net devices.
fn mmds_config(&self) -> Option<MmdsConfig> {
// If the data store is not initialised, we can be sure that the user did not configure
// mmds.
let mmds = self.mmds.as_ref()?;
let mut mmds_config = None;
let net_devs_with_mmds: Vec<_> = self
.net_builder
.iter()
.filter(|net| net.lock().expect("Poisoned lock").mmds_ns().is_some())
.collect();
if !net_devs_with_mmds.is_empty() {
let mmds_guard = mmds.lock().expect("Poisoned lock");
let mut inner_mmds_config = MmdsConfig {
version: mmds_guard.version(),
network_interfaces: vec![],
ipv4_address: None,
imds_compat: mmds_guard.imds_compat(),
};
for net_dev in net_devs_with_mmds {
let net = net_dev.lock().unwrap();
inner_mmds_config.network_interfaces.push(net.id().clone());
// Only need to get one ip address, as they will all be equal.
if inner_mmds_config.ipv4_address.is_none() {
// Safe to unwrap the mmds_ns as the filter() explicitly checks for
// its existence.
inner_mmds_config.ipv4_address = Some(net.mmds_ns().unwrap().ipv4_addr());
}
}
mmds_config = Some(inner_mmds_config);
}
mmds_config
}
/// Sets a balloon device to be attached when the VM starts.
pub fn set_balloon_device(
&mut self,
config: BalloonDeviceConfig,
) -> Result<(), BalloonConfigError> {
// The balloon cannot have a target size greater than the size of
// the guest memory.
if config.amount_mib as usize > self.machine_config.mem_size_mib {
return Err(BalloonConfigError::TooManyPagesRequested);
}
self.balloon.set(config)
}
/// Obtains the boot source hooks (kernel fd, command line creation and validation).
pub fn build_boot_source(
&mut self,
boot_source_cfg: BootSourceConfig,
) -> Result<(), BootSourceConfigError> {
self.boot_source = BootSource {
builder: Some(BootConfig::new(&boot_source_cfg)?),
config: boot_source_cfg,
};
Ok(())
}
/// Inserts a block to be attached when the VM starts.
// Only call this function as part of user configuration.
// If the drive_id does not exist, a new Block Device Config is added to the list.
pub fn set_block_device(
&mut self,
block_device_config: BlockDeviceConfig,
) -> Result<(), DriveError> {
let has_pmem_root = self.pmem.has_root_device();
self.block.insert(block_device_config, has_pmem_root)
}
/// Builds a network device to be attached when the VM starts.
pub fn build_net_device(
&mut self,
body: NetworkInterfaceConfig,
) -> Result<(), NetworkInterfaceError> {
let _ = self.net_builder.build(body)?;
Ok(())
}
/// Sets a vsock device to be attached when the VM starts.
pub fn set_vsock_device(&mut self, config: VsockDeviceConfig) -> Result<(), VsockConfigError> {
self.vsock.insert(config)
}
/// Builds an entropy device to be attached when the VM starts.
pub fn build_entropy_device(
&mut self,
body: EntropyDeviceConfig,
) -> Result<(), EntropyDeviceError> {
self.entropy.insert(body)
}
/// Builds a pmem device to be attached when the VM starts.
pub fn build_pmem_device(&mut self, body: PmemConfig) -> Result<(), PmemConfigError> {
let has_block_root = self.block.has_root_device();
self.pmem.build(body, has_block_root)
}
/// Sets the memory hotplug configuration.
pub fn set_memory_hotplug_config(
&mut self,
config: MemoryHotplugConfig,
) -> Result<(), MemoryHotplugConfigError> {
config.validate()?;
self.memory_hotplug = Some(config);
Ok(())
}
/// Setter for mmds config.
pub fn set_mmds_config(
&mut self,
config: MmdsConfig,
instance_id: &str,
) -> Result<(), MmdsConfigError> {
self.set_mmds_network_stack_config(&config)?;
self.set_mmds_basic_config(config.version, config.imds_compat, instance_id)?;
Ok(())
}
/// Updates MMDS-related config other than MMDS network stack.
pub fn set_mmds_basic_config(
&mut self,
version: MmdsVersion,
imds_compat: bool,
instance_id: &str,
) -> Result<(), MmdsConfigError> {
let mut mmds_guard = self.locked_mmds_or_default()?;
mmds_guard.set_version(version);
mmds_guard.set_imds_compat(imds_compat);
mmds_guard.set_aad(instance_id);
Ok(())
}
// Updates MMDS Network Stack for network interfaces to allow forwarding
// requests to MMDS (or not).
fn set_mmds_network_stack_config(
&mut self,
config: &MmdsConfig,
) -> Result<(), MmdsConfigError> {
// Check IPv4 address validity.
let ipv4_addr = match config.ipv4_addr() {
Some(ipv4_addr) if is_link_local_valid(ipv4_addr) => Ok(ipv4_addr),
None => Ok(MmdsNetworkStack::default_ipv4_addr()),
_ => Err(MmdsConfigError::InvalidIpv4Addr),
}?;
let network_interfaces = config.network_interfaces();
// Ensure that at least one network ID is specified.
if network_interfaces.is_empty() {
return Err(MmdsConfigError::EmptyNetworkIfaceList);
}
// Ensure all interface IDs specified correspond to existing net devices.
if !network_interfaces.iter().all(|id| {
self.net_builder
.iter()
.map(|device| device.lock().expect("Poisoned lock").id().clone())
.any(|x| &x == id)
}) {
return Err(MmdsConfigError::InvalidNetworkInterfaceId);
}
// Safe to unwrap because we've just made sure that it's initialised.
let mmds = self.mmds_or_default()?.clone();
// Create `MmdsNetworkStack` and configure the IPv4 address for
// existing built network devices whose names are defined in the
// network interface ID list.
for net_device in self.net_builder.iter() {
let mut net_device_lock = net_device.lock().expect("Poisoned lock");
if network_interfaces.contains(net_device_lock.id()) {
net_device_lock.configure_mmds_network_stack(ipv4_addr, mmds.clone());
} else {
net_device_lock.disable_mmds_network_stack();
}
}
Ok(())
}
/// Allocates the given guest memory regions.
///
/// If vhost-user-blk devices are in use, allocates memfd-backed shared memory, otherwise
/// prefers anonymous memory for performance reasons.
fn allocate_memory_regions(
&self,
regions: &[(GuestAddress, usize)],
) -> Result<Vec<GuestRegionMmap>, MemoryError> {
let vhost_user_device_used = self
.block
.devices
.iter()
.any(|b| b.lock().expect("Poisoned lock").is_vhost_user());
// Page faults are more expensive for shared memory mapping, including memfd.
// For this reason, we only back guest memory with a memfd
// if a vhost-user-blk device is configured in the VM, otherwise we fall back to
// an anonymous private memory.
//
// The vhost-user-blk branch is not currently covered by integration tests in Rust,
// because that would require running a backend process. If in the future we converge to
// a single way of backing guest memory for vhost-user and non-vhost-user cases,
// that would not be worth the effort.
if vhost_user_device_used {
memory::memfd_backed(
regions,
self.machine_config.track_dirty_pages,
self.machine_config.huge_pages,
)
} else {
memory::anonymous(
regions.iter().copied(),
self.machine_config.track_dirty_pages,
self.machine_config.huge_pages,
)
}
}
/// Allocates guest memory in a configuration most appropriate for these [`VmResources`].
pub fn allocate_guest_memory(&self) -> Result<Vec<GuestRegionMmap>, MemoryError> {
let regions =
crate::arch::arch_memory_regions(mib_to_bytes(self.machine_config.mem_size_mib));
self.allocate_memory_regions(®ions)
}
/// Allocates a single guest memory region.
pub fn allocate_memory_region(
&self,
start: GuestAddress,
size: usize,
) -> Result<GuestRegionMmap, MemoryError> {
Ok(self
.allocate_memory_regions(&[(start, size)])?
.pop()
.unwrap())
}
}
impl From<&VmResources> for VmmConfig {
fn from(resources: &VmResources) -> Self {
VmmConfig {
balloon: resources.balloon.get_config().ok(),
drives: resources.block.configs(),
boot_source: resources.boot_source.config.clone(),
cpu_config: None,
logger: None,
machine_config: Some(resources.machine_config.clone()),
metrics: None,
mmds_config: resources.mmds_config(),
network_interfaces: resources.net_builder.configs(),
vsock: resources.vsock.config(),
entropy: resources.entropy.config(),
pmem_devices: resources.pmem.configs(),
// serial_config is marked serde(skip) so that it doesnt end up in snapshots.
serial_config: None,
memory_hotplug: resources.memory_hotplug.clone(),
}
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use std::os::linux::fs::MetadataExt;
use std::str::FromStr;
use serde_json::{Map, Value};
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::HTTP_MAX_PAYLOAD_SIZE;
use crate::cpu_config::templates::test_utils::TEST_TEMPLATE_JSON;
use crate::cpu_config::templates::{CpuTemplateType, StaticCpuTemplate};
use crate::devices::virtio::block::virtio::VirtioBlockError;
use crate::devices::virtio::block::{BlockError, CacheType};
use crate::devices::virtio::vsock::VSOCK_DEV_ID;
use crate::resources::VmResources;
use crate::utils::net::mac::MacAddr;
use crate::vmm_config::RateLimiterConfig;
use crate::vmm_config::boot_source::{
BootConfig, BootSource, BootSourceConfig, DEFAULT_KERNEL_CMDLINE,
};
use crate::vmm_config::drive::{BlockBuilder, BlockDeviceConfig};
use crate::vmm_config::machine_config::{HugePageConfig, MachineConfig, MachineConfigError};
use crate::vmm_config::net::{NetBuilder, NetworkInterfaceConfig};
use crate::vmm_config::vsock::tests::default_config;
fn default_net_cfg() -> NetworkInterfaceConfig {
NetworkInterfaceConfig {
iface_id: "net_if1".to_string(),
// TempFile::new_with_prefix("") generates a random file name used as random net_if
// name.
host_dev_name: TempFile::new_with_prefix("")
.unwrap()
.as_path()
.to_str()
.unwrap()
.to_string(),
guest_mac: Some(MacAddr::from_str("01:23:45:67:89:0a").unwrap()),
rx_rate_limiter: Some(RateLimiterConfig::default()),
tx_rate_limiter: Some(RateLimiterConfig::default()),
}
}
fn default_net_builder() -> NetBuilder {
let mut net_builder = NetBuilder::new();
net_builder.build(default_net_cfg()).unwrap();
net_builder
}
fn default_block_cfg() -> (BlockDeviceConfig, TempFile) {
let tmp_file = TempFile::new().unwrap();
(
BlockDeviceConfig {
drive_id: "block1".to_string(),
partuuid: Some("0eaa91a0-01".to_string()),
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(tmp_file.as_path().to_str().unwrap().to_string()),
rate_limiter: Some(RateLimiterConfig::default()),
file_engine_type: None,
socket: None,
},
tmp_file,
)
}
fn default_blocks() -> BlockBuilder {
let mut blocks = BlockBuilder::new();
let (cfg, _file) = default_block_cfg();
blocks.insert(cfg, false).unwrap();
blocks
}
fn default_boot_cfg() -> BootSource {
let kernel_cmdline =
linux_loader::cmdline::Cmdline::try_from(DEFAULT_KERNEL_CMDLINE, 4096).unwrap();
let tmp_file = TempFile::new().unwrap();
BootSource {
config: BootSourceConfig::default(),
builder: Some(BootConfig {
cmdline: kernel_cmdline,
kernel_file: File::open(tmp_file.as_path()).unwrap(),
initrd_file: Some(File::open(tmp_file.as_path()).unwrap()),
}),
}
}
fn default_vm_resources() -> VmResources {
VmResources {
machine_config: MachineConfig::default(),
boot_source: default_boot_cfg(),
block: default_blocks(),
vsock: Default::default(),
balloon: Default::default(),
net_builder: default_net_builder(),
mmds: None,
boot_timer: false,
mmds_size_limit: HTTP_MAX_PAYLOAD_SIZE,
entropy: Default::default(),
pmem: Default::default(),
pci_enabled: false,
serial_out_path: None,
memory_hotplug: Default::default(),
}
}
#[test]
fn test_from_json() {
let kernel_file = TempFile::new().unwrap();
let rootfs_file = TempFile::new().unwrap();
let scratch_file = TempFile::new().unwrap();
scratch_file.as_file().set_len(0x1000).unwrap();
let default_instance_info = InstanceInfo::default();
// We will test different scenarios with invalid resources configuration and
// check the expected errors. We include configuration for the kernel and rootfs
// in every json because they are mandatory fields. If we don't configure
// these resources, it is considered an invalid json and the test will crash.
// Invalid JSON string must yield a `serde_json` error.
let error =
VmResources::from_json(r#"}"#, &default_instance_info, HTTP_MAX_PAYLOAD_SIZE, None)
.unwrap_err();
assert!(
matches!(error, ResourcesError::InvalidJson(_)),
"{:?}",
error
);
// Valid JSON string without the configuration for kernel or rootfs
// result in an invalid JSON error.
let error =
VmResources::from_json(r#"{}"#, &default_instance_info, HTTP_MAX_PAYLOAD_SIZE, None)
.unwrap_err();
assert!(
matches!(error, ResourcesError::InvalidJson(_)),
"{:?}",
error
);
// Invalid kernel path.
let mut json = format!(
r#"{{
"boot-source": {{
"kernel_image_path": "/invalid/path",
"boot_args": "console=ttyS0 reboot=k panic=1 pci=off"
}},
"drives": [
{{
"drive_id": "rootfs",
"path_on_host": "{}",
"is_root_device": true,
"is_read_only": false
}}
]
}}"#,
rootfs_file.as_path().to_str().unwrap()
);
let error = VmResources::from_json(
json.as_str(),
&default_instance_info,
HTTP_MAX_PAYLOAD_SIZE,
None,
)
.unwrap_err();
assert!(
matches!(
error,
ResourcesError::BootSource(BootSourceConfigError::InvalidKernelPath(_))
),
"{:?}",
error
);
// Invalid rootfs path.
json = format!(
r#"{{
"boot-source": {{
"kernel_image_path": "{}",
"boot_args": "console=ttyS0 reboot=k panic=1 pci=off"
}},
"drives": [
{{
"drive_id": "rootfs",
"path_on_host": "/invalid/path",
"is_root_device": true,
"is_read_only": false
}}
]
}}"#,
kernel_file.as_path().to_str().unwrap()
);
let error = VmResources::from_json(
json.as_str(),
&default_instance_info,
HTTP_MAX_PAYLOAD_SIZE,
None,
)
.unwrap_err();
assert!(
matches!(
error,
ResourcesError::BlockDevice(DriveError::CreateBlockDevice(
BlockError::VirtioBackend(VirtioBlockError::BackingFile(_, _)),
))
),
"{:?}",
error
);
// Valid config for x86 but invalid on aarch64 since it uses cpu_template.
json = format!(
r#"{{
"boot-source": {{
"kernel_image_path": "{}",
"boot_args": "console=ttyS0 reboot=k panic=1 pci=off"
}},
"drives": [
{{
"drive_id": "rootfs",
"path_on_host": "{}",
"is_root_device": true,
"is_read_only": false
}}
],
"machine-config": {{
"vcpu_count": 2,
"mem_size_mib": 1024,
"cpu_template": "C3"
}}
}}"#,
kernel_file.as_path().to_str().unwrap(),
rootfs_file.as_path().to_str().unwrap()
);
#[cfg(target_arch = "x86_64")]
VmResources::from_json(
json.as_str(),
&default_instance_info,
HTTP_MAX_PAYLOAD_SIZE,
None,
)
.unwrap();
#[cfg(target_arch = "aarch64")]
VmResources::from_json(
json.as_str(),
&default_instance_info,
HTTP_MAX_PAYLOAD_SIZE,
None,
)
.unwrap_err();
// Invalid memory size.
json = format!(
r#"{{
"boot-source": {{
"kernel_image_path": "{}",
"boot_args": "console=ttyS0 reboot=k panic=1 pci=off"
}},
"drives": [
{{
"drive_id": "rootfs",
"path_on_host": "{}",
"is_root_device": true,
"is_read_only": false
}}
],
"machine-config": {{
"vcpu_count": 2,
"mem_size_mib": 0
}}
}}"#,
kernel_file.as_path().to_str().unwrap(),
rootfs_file.as_path().to_str().unwrap()
);
let error = VmResources::from_json(
json.as_str(),
&default_instance_info,
HTTP_MAX_PAYLOAD_SIZE,
None,
)
.unwrap_err();
assert!(
matches!(
error,
ResourcesError::MachineConfig(MachineConfigError::InvalidMemorySize)
),
"{:?}",
error
);
// Invalid path for logger pipe.
json = format!(
r#"{{
"boot-source": {{
"kernel_image_path": "{}",
"boot_args": "console=ttyS0 reboot=k panic=1 pci=off"
}},
"drives": [
{{
"drive_id": "rootfs",
"path_on_host": "{}",
"is_root_device": true,
"is_read_only": false
}}
],
"logger": {{
"log_path": "/invalid/path"
}}
}}"#,
kernel_file.as_path().to_str().unwrap(),
rootfs_file.as_path().to_str().unwrap()
);
let error = VmResources::from_json(
json.as_str(),
&default_instance_info,
HTTP_MAX_PAYLOAD_SIZE,
None,
)
.unwrap_err();
assert!(
matches!(
error,
ResourcesError::Logger(crate::logger::LoggerUpdateError(_))
),
"{:?}",
error
);
// Invalid path for metrics pipe.
json = format!(
r#"{{
"boot-source": {{
"kernel_image_path": "{}",
"boot_args": "console=ttyS0 reboot=k panic=1 pci=off"
}},
"drives": [
{{
"drive_id": "rootfs",
"path_on_host": "{}",
"is_root_device": true,
"is_read_only": false
}}
],
"metrics": {{
"metrics_path": "/invalid/path"
}}
}}"#,
kernel_file.as_path().to_str().unwrap(),
rootfs_file.as_path().to_str().unwrap()
);
let error = VmResources::from_json(
json.as_str(),
&default_instance_info,
HTTP_MAX_PAYLOAD_SIZE,
None,
)
.unwrap_err();
assert!(
matches!(
error,
ResourcesError::Metrics(MetricsConfigError::InitializationFailure { .. })
),
"{:?}",
error
);
// Reuse of a host name.
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/rpc_interface.rs | src/vmm/src/rpc_interface.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::{self, Debug};
use std::sync::{Arc, Mutex, MutexGuard};
use serde_json::Value;
use utils::time::{ClockType, get_time_us};
use super::builder::build_and_boot_microvm;
use super::persist::{create_snapshot, restore_from_snapshot};
use super::resources::VmResources;
use super::{Vmm, VmmError};
use crate::EventManager;
use crate::builder::StartMicrovmError;
use crate::cpu_config::templates::{CustomCpuTemplate, GuestConfigError};
use crate::devices::virtio::balloon::device::{HintingStatus, StartHintingCmd};
use crate::devices::virtio::mem::VirtioMemStatus;
use crate::logger::{LoggerConfig, info, warn, *};
use crate::mmds::data_store::{self, Mmds};
use crate::persist::{CreateSnapshotError, RestoreFromSnapshotError, VmInfo};
use crate::resources::VmmConfig;
use crate::seccomp::BpfThreadMap;
use crate::vmm_config::balloon::{
BalloonConfigError, BalloonDeviceConfig, BalloonStats, BalloonUpdateConfig,
BalloonUpdateStatsConfig,
};
use crate::vmm_config::boot_source::{BootSourceConfig, BootSourceConfigError};
use crate::vmm_config::drive::{BlockDeviceConfig, BlockDeviceUpdateConfig, DriveError};
use crate::vmm_config::entropy::{EntropyDeviceConfig, EntropyDeviceError};
use crate::vmm_config::instance_info::InstanceInfo;
use crate::vmm_config::machine_config::{MachineConfig, MachineConfigError, MachineConfigUpdate};
use crate::vmm_config::memory_hotplug::{
MemoryHotplugConfig, MemoryHotplugConfigError, MemoryHotplugSizeUpdate,
};
use crate::vmm_config::metrics::{MetricsConfig, MetricsConfigError};
use crate::vmm_config::mmds::{MmdsConfig, MmdsConfigError};
use crate::vmm_config::net::{
NetworkInterfaceConfig, NetworkInterfaceError, NetworkInterfaceUpdateConfig,
};
use crate::vmm_config::pmem::{PmemConfig, PmemConfigError};
use crate::vmm_config::serial::SerialConfig;
use crate::vmm_config::snapshot::{CreateSnapshotParams, LoadSnapshotParams, SnapshotType};
use crate::vmm_config::vsock::{VsockConfigError, VsockDeviceConfig};
use crate::vmm_config::{self, RateLimiterUpdate};
/// This enum represents the public interface of the VMM. Each action contains various
/// bits of information (ids, paths, etc.).
#[derive(Debug, PartialEq, Eq)]
pub enum VmmAction {
/// Configure the boot source of the microVM using as input the `ConfigureBootSource`. This
/// action can only be called before the microVM has booted.
ConfigureBootSource(BootSourceConfig),
/// Configure the logger using as input the `LoggerConfig`. This action can only be called
/// before the microVM has booted.
ConfigureLogger(LoggerConfig),
/// Configure the metrics using as input the `MetricsConfig`. This action can only be called
/// before the microVM has booted.
ConfigureMetrics(MetricsConfig),
/// Configure the serial device. This action can only be called before the microVM has booted.
ConfigureSerial(SerialConfig),
/// Create a snapshot using as input the `CreateSnapshotParams`. This action can only be called
/// after the microVM has booted and only when the microVM is in `Paused` state.
CreateSnapshot(CreateSnapshotParams),
/// Get the balloon device configuration.
GetBalloonConfig,
/// Get the ballon device latest statistics.
GetBalloonStats,
/// Get complete microVM configuration in JSON format.
GetFullVmConfig,
/// Get MMDS contents.
GetMMDS,
/// Get the machine configuration of the microVM.
GetVmMachineConfig,
/// Get microVM instance information.
GetVmInstanceInfo,
/// Get microVM version.
GetVmmVersion,
/// Flush the metrics. This action can only be called after the logger has been configured.
FlushMetrics,
/// Add a new block device or update one that already exists using the `BlockDeviceConfig` as
/// input. This action can only be called before the microVM has booted.
InsertBlockDevice(BlockDeviceConfig),
/// Add a virtio-pmem device.
InsertPmemDevice(PmemConfig),
/// Add a new network interface config or update one that already exists using the
/// `NetworkInterfaceConfig` as input. This action can only be called before the microVM has
/// booted.
InsertNetworkDevice(NetworkInterfaceConfig),
/// Load the microVM state using as input the `LoadSnapshotParams`. This action can only be
/// called before the microVM has booted. If this action is successful, the loaded microVM will
/// be in `Paused` state. Should change this state to `Resumed` for the microVM to run.
LoadSnapshot(LoadSnapshotParams),
/// Partial update of the MMDS contents.
PatchMMDS(Value),
/// Pause the guest, by pausing the microVM VCPUs.
Pause,
/// Repopulate the MMDS contents.
PutMMDS(Value),
/// Configure the guest vCPU features.
PutCpuConfiguration(CustomCpuTemplate),
/// Resume the guest, by resuming the microVM VCPUs.
Resume,
/// Set the balloon device or update the one that already exists using the
/// `BalloonDeviceConfig` as input. This action can only be called before the microVM
/// has booted.
SetBalloonDevice(BalloonDeviceConfig),
/// Set the MMDS configuration.
SetMmdsConfiguration(MmdsConfig),
/// Set the vsock device or update the one that already exists using the
/// `VsockDeviceConfig` as input. This action can only be called before the microVM has
/// booted.
SetVsockDevice(VsockDeviceConfig),
/// Set the entropy device using `EntropyDeviceConfig` as input. This action can only be called
/// before the microVM has booted.
SetEntropyDevice(EntropyDeviceConfig),
/// Get the memory hotplug device configuration and status.
GetMemoryHotplugStatus,
/// Set the memory hotplug device using `MemoryHotplugConfig` as input. This action can only be
/// called before the microVM has booted.
SetMemoryHotplugDevice(MemoryHotplugConfig),
/// Updates the memory hotplug device using `MemoryHotplugConfigUpdate` as input. This action
/// can only be called after the microVM has booted.
UpdateMemoryHotplugSize(MemoryHotplugSizeUpdate),
/// Launch the microVM. This action can only be called before the microVM has booted.
StartMicroVm,
/// Send CTRL+ALT+DEL to the microVM, using the i8042 keyboard function. If an AT-keyboard
/// driver is listening on the guest end, this can be used to shut down the microVM gracefully.
#[cfg(target_arch = "x86_64")]
SendCtrlAltDel,
/// Update the balloon size, after microVM start.
UpdateBalloon(BalloonUpdateConfig),
/// Update the balloon statistics polling interval, after microVM start.
UpdateBalloonStatistics(BalloonUpdateStatsConfig),
/// Start a free page hinting run
StartFreePageHinting(StartHintingCmd),
/// Retrieve the status of the hinting run
GetFreePageHintingStatus,
/// Stops a free page hinting run
StopFreePageHinting,
/// Update existing block device properties such as `path_on_host` or `rate_limiter`.
UpdateBlockDevice(BlockDeviceUpdateConfig),
/// Update a network interface, after microVM start. Currently, the only updatable properties
/// are the RX and TX rate limiters.
UpdateNetworkInterface(NetworkInterfaceUpdateConfig),
/// Update the microVM configuration (memory & vcpu) using `VmUpdateConfig` as input. This
/// action can only be called before the microVM has booted.
UpdateMachineConfiguration(MachineConfigUpdate),
}
/// Wrapper for all errors associated with VMM actions.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VmmActionError {
/// Balloon config error: {0}
BalloonConfig(#[from] BalloonConfigError),
/// Balloon update error: {0}
BalloonUpdate(VmmError),
/// Boot source error: {0}
BootSource(#[from] BootSourceConfigError),
/// Create snapshot error: {0}
CreateSnapshot(#[from] CreateSnapshotError),
/// Configure CPU error: {0}
ConfigureCpu(#[from] GuestConfigError),
/// Drive config error: {0}
DriveConfig(#[from] DriveError),
/// Entropy device error: {0}
EntropyDevice(#[from] EntropyDeviceError),
/// Pmem device error: {0}
PmemDevice(#[from] PmemConfigError),
/// Memory hotplug config error: {0}
MemoryHotplugConfig(#[from] MemoryHotplugConfigError),
/// Memory hotplug update error: {0}
MemoryHotplugUpdate(VmmError),
/// Internal VMM error: {0}
InternalVmm(#[from] VmmError),
/// Load snapshot error: {0}
LoadSnapshot(#[from] LoadSnapshotError),
/// Logger error: {0}
Logger(#[from] crate::logger::LoggerUpdateError),
/// Machine config error: {0}
MachineConfig(#[from] MachineConfigError),
/// Metrics error: {0}
Metrics(#[from] MetricsConfigError),
#[from(ignore)]
/// MMDS error: {0}
Mmds(#[from] data_store::MmdsDatastoreError),
/// MMMDS config error: {0}
MmdsConfig(#[from] MmdsConfigError),
#[from(ignore)]
/// MMDS limit exceeded error: {0}
MmdsLimitExceeded(data_store::MmdsDatastoreError),
/// Network config error: {0}
NetworkConfig(#[from] NetworkInterfaceError),
/// The requested operation is not supported: {0}
NotSupported(String),
/// The requested operation is not supported after starting the microVM.
OperationNotSupportedPostBoot,
/// The requested operation is not supported before starting the microVM.
OperationNotSupportedPreBoot,
/// Start microvm error: {0}
StartMicrovm(#[from] StartMicrovmError),
/// Vsock config error: {0}
VsockConfig(#[from] VsockConfigError),
}
/// The enum represents the response sent by the VMM in case of success. The response is either
/// empty, when no data needs to be sent, or an internal VMM structure.
#[allow(clippy::large_enum_variant)]
#[derive(Debug, PartialEq, Eq)]
pub enum VmmData {
/// The balloon device configuration.
BalloonConfig(BalloonDeviceConfig),
/// The latest balloon device statistics.
BalloonStats(BalloonStats),
/// No data is sent on the channel.
Empty,
/// The complete microVM configuration in JSON format.
FullVmConfig(VmmConfig),
/// The microVM configuration represented by `VmConfig`.
MachineConfiguration(MachineConfig),
/// Mmds contents.
MmdsValue(serde_json::Value),
/// The microVM instance information.
InstanceInformation(InstanceInfo),
/// The microVM version.
VmmVersion(String),
/// The status of the memory hotplug device.
VirtioMemStatus(VirtioMemStatus),
/// The status of the virtio-balloon hinting run
HintingStatus(HintingStatus),
}
/// Trait used for deduplicating the MMDS request handling across the two ApiControllers.
/// The methods get a mutable reference to self because the methods should initialise the data
/// store with the defaults if it's not already initialised.
trait MmdsRequestHandler {
fn mmds(&mut self) -> Result<MutexGuard<'_, Mmds>, VmmActionError>;
fn get_mmds(&mut self) -> Result<VmmData, VmmActionError> {
Ok(VmmData::MmdsValue(self.mmds()?.data_store_value()))
}
fn patch_mmds(&mut self, value: serde_json::Value) -> Result<VmmData, VmmActionError> {
self.mmds()?
.patch_data(value)
.map(|()| VmmData::Empty)
.map_err(|err| match err {
data_store::MmdsDatastoreError::DataStoreLimitExceeded => {
VmmActionError::MmdsLimitExceeded(
data_store::MmdsDatastoreError::DataStoreLimitExceeded,
)
}
_ => VmmActionError::Mmds(err),
})
}
fn put_mmds(&mut self, value: serde_json::Value) -> Result<VmmData, VmmActionError> {
self.mmds()?
.put_data(value)
.map(|()| VmmData::Empty)
.map_err(|err| match err {
data_store::MmdsDatastoreError::DataStoreLimitExceeded => {
VmmActionError::MmdsLimitExceeded(
data_store::MmdsDatastoreError::DataStoreLimitExceeded,
)
}
_ => VmmActionError::Mmds(err),
})
}
}
/// Enables pre-boot setup and instantiation of a Firecracker VMM.
pub struct PrebootApiController<'a> {
seccomp_filters: &'a BpfThreadMap,
instance_info: InstanceInfo,
vm_resources: &'a mut VmResources,
event_manager: &'a mut EventManager,
/// The [`Vmm`] object constructed through requests
pub built_vmm: Option<Arc<Mutex<Vmm>>>,
// Configuring boot specific resources will set this to true.
// Loading from snapshot will not be allowed once this is true.
boot_path: bool,
// Some PrebootApiRequest errors are irrecoverable and Firecracker
// should cleanly teardown if they occur.
fatal_error: Option<BuildMicrovmFromRequestsError>,
}
// TODO Remove when `EventManager` implements `std::fmt::Debug`.
impl fmt::Debug for PrebootApiController<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PrebootApiController")
.field("seccomp_filters", &self.seccomp_filters)
.field("instance_info", &self.instance_info)
.field("vm_resources", &self.vm_resources)
.field("event_manager", &"?")
.field("built_vmm", &self.built_vmm)
.field("boot_path", &self.boot_path)
.field("fatal_error", &self.fatal_error)
.finish()
}
}
impl MmdsRequestHandler for PrebootApiController<'_> {
fn mmds(&mut self) -> Result<MutexGuard<'_, Mmds>, VmmActionError> {
self.vm_resources
.locked_mmds_or_default()
.map_err(VmmActionError::MmdsConfig)
}
}
/// Error type for [`PrebootApiController::load_snapshot`]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum LoadSnapshotError {
/// Loading a microVM snapshot not allowed after configuring boot-specific resources.
LoadSnapshotNotAllowed,
/// Failed to restore from snapshot: {0}
RestoreFromSnapshot(#[from] RestoreFromSnapshotError),
/// Failed to resume microVM: {0}
ResumeMicrovm(#[from] VmmError),
}
/// Shorthand type for a request containing a boxed VmmAction.
pub type ApiRequest = Box<VmmAction>;
/// Shorthand type for a response containing a boxed Result.
pub type ApiResponse = Box<Result<VmmData, VmmActionError>>;
/// Error type for `PrebootApiController::build_microvm_from_requests`.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum BuildMicrovmFromRequestsError {
/// Configuring MMDS failed: {0}.
ConfigureMmds(#[from] MmdsConfigError),
/// Populating MMDS from file failed: {0}.
PopulateMmds(#[from] data_store::MmdsDatastoreError),
/// Loading snapshot failed.
Restore,
/// Resuming MicroVM after loading snapshot failed.
Resume,
}
impl<'a> PrebootApiController<'a> {
/// Constructor for the PrebootApiController.
pub fn new(
seccomp_filters: &'a BpfThreadMap,
instance_info: InstanceInfo,
vm_resources: &'a mut VmResources,
event_manager: &'a mut EventManager,
) -> Self {
Self {
seccomp_filters,
instance_info,
vm_resources,
event_manager,
built_vmm: None,
boot_path: false,
fatal_error: None,
}
}
/// Default implementation for the function that builds and starts a microVM.
///
/// Returns a populated `VmResources` object and a running `Vmm` object.
#[allow(clippy::too_many_arguments)]
pub fn build_microvm_from_requests(
seccomp_filters: &BpfThreadMap,
event_manager: &mut EventManager,
instance_info: InstanceInfo,
from_api: &std::sync::mpsc::Receiver<ApiRequest>,
to_api: &std::sync::mpsc::Sender<ApiResponse>,
api_event_fd: &vmm_sys_util::eventfd::EventFd,
boot_timer_enabled: bool,
pci_enabled: bool,
mmds_size_limit: usize,
metadata_json: Option<&str>,
) -> Result<(VmResources, Arc<Mutex<Vmm>>), BuildMicrovmFromRequestsError> {
let mut vm_resources = VmResources {
boot_timer: boot_timer_enabled,
mmds_size_limit,
pci_enabled,
..Default::default()
};
// Init the data store from file, if present.
if let Some(data) = metadata_json {
vm_resources.locked_mmds_or_default()?.put_data(
serde_json::from_str(data).expect("MMDS error: metadata provided not valid json"),
)?;
info!("Successfully added metadata to mmds from file");
}
let mut preboot_controller = PrebootApiController::new(
seccomp_filters,
instance_info,
&mut vm_resources,
event_manager,
);
// Configure and start microVM through successive API calls.
// Iterate through API calls to configure microVm.
// The loop breaks when a microVM is successfully started, and a running Vmm is built.
while preboot_controller.built_vmm.is_none() {
// Get request
let req = from_api
.recv()
.expect("The channel's sending half was disconnected. Cannot receive data.");
// Also consume the API event along with the message. It is safe to unwrap()
// because this event_fd is blocking.
api_event_fd
.read()
.expect("VMM: Failed to read the API event_fd");
// Process the request.
let res = preboot_controller.handle_preboot_request(*req);
// Send back the response.
to_api.send(Box::new(res)).expect("one-shot channel closed");
// If any fatal errors were encountered, break the loop.
if let Some(preboot_error) = preboot_controller.fatal_error {
return Err(preboot_error);
}
}
// Safe to unwrap because previous loop cannot end on None.
let vmm = preboot_controller.built_vmm.unwrap();
Ok((vm_resources, vmm))
}
/// Handles the incoming preboot request and provides a response for it.
/// Returns a built/running `Vmm` after handling a successful `StartMicroVm` request.
pub fn handle_preboot_request(
&mut self,
request: VmmAction,
) -> Result<VmmData, VmmActionError> {
use self::VmmAction::*;
match request {
// Supported operations allowed pre-boot.
ConfigureBootSource(config) => self.set_boot_source(config),
ConfigureLogger(logger_cfg) => crate::logger::LOGGER
.update(logger_cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::Logger),
ConfigureMetrics(metrics_cfg) => vmm_config::metrics::init_metrics(metrics_cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::Metrics),
ConfigureSerial(serial_cfg) => {
self.vm_resources.serial_out_path = serial_cfg.serial_out_path;
Ok(VmmData::Empty)
}
GetBalloonConfig => self.balloon_config(),
GetFullVmConfig => {
warn!(
"If the VM was restored from snapshot, boot-source, machine-config.smt, and \
machine-config.cpu_template will all be empty."
);
Ok(VmmData::FullVmConfig((&*self.vm_resources).into()))
}
GetMMDS => self.get_mmds(),
GetVmMachineConfig => Ok(VmmData::MachineConfiguration(
self.vm_resources.machine_config.clone(),
)),
GetVmInstanceInfo => Ok(VmmData::InstanceInformation(self.instance_info.clone())),
GetVmmVersion => Ok(VmmData::VmmVersion(self.instance_info.vmm_version.clone())),
InsertBlockDevice(config) => self.insert_block_device(config),
InsertPmemDevice(config) => self.insert_pmem_device(config),
InsertNetworkDevice(config) => self.insert_net_device(config),
LoadSnapshot(config) => self
.load_snapshot(&config)
.map_err(VmmActionError::LoadSnapshot),
PatchMMDS(value) => self.patch_mmds(value),
PutCpuConfiguration(custom_cpu_template) => {
self.set_custom_cpu_template(custom_cpu_template)
}
PutMMDS(value) => self.put_mmds(value),
SetBalloonDevice(config) => self.set_balloon_device(config),
SetVsockDevice(config) => self.set_vsock_device(config),
SetMmdsConfiguration(config) => self.set_mmds_config(config),
StartMicroVm => self.start_microvm(),
UpdateMachineConfiguration(config) => self.update_machine_config(config),
SetEntropyDevice(config) => self.set_entropy_device(config),
SetMemoryHotplugDevice(config) => self.set_memory_hotplug_device(config),
// Operations not allowed pre-boot.
CreateSnapshot(_)
| FlushMetrics
| Pause
| Resume
| GetBalloonStats
| GetMemoryHotplugStatus
| UpdateBalloon(_)
| UpdateBalloonStatistics(_)
| UpdateBlockDevice(_)
| UpdateMemoryHotplugSize(_)
| UpdateNetworkInterface(_)
| StartFreePageHinting(_)
| GetFreePageHintingStatus
| StopFreePageHinting => Err(VmmActionError::OperationNotSupportedPreBoot),
#[cfg(target_arch = "x86_64")]
SendCtrlAltDel => Err(VmmActionError::OperationNotSupportedPreBoot),
}
}
fn balloon_config(&mut self) -> Result<VmmData, VmmActionError> {
self.vm_resources
.balloon
.get_config()
.map(VmmData::BalloonConfig)
.map_err(VmmActionError::BalloonConfig)
}
fn insert_block_device(&mut self, cfg: BlockDeviceConfig) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources
.set_block_device(cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::DriveConfig)
}
fn insert_net_device(
&mut self,
cfg: NetworkInterfaceConfig,
) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources
.build_net_device(cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::NetworkConfig)
}
fn insert_pmem_device(&mut self, cfg: PmemConfig) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources
.build_pmem_device(cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::PmemDevice)
}
fn set_balloon_device(&mut self, cfg: BalloonDeviceConfig) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources
.set_balloon_device(cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::BalloonConfig)
}
fn set_boot_source(&mut self, cfg: BootSourceConfig) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources
.build_boot_source(cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::BootSource)
}
fn set_mmds_config(&mut self, cfg: MmdsConfig) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources
.set_mmds_config(cfg, &self.instance_info.id)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::MmdsConfig)
}
fn update_machine_config(
&mut self,
cfg: MachineConfigUpdate,
) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources
.update_machine_config(&cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::MachineConfig)
}
fn set_custom_cpu_template(
&mut self,
cpu_template: CustomCpuTemplate,
) -> Result<VmmData, VmmActionError> {
self.vm_resources.set_custom_cpu_template(cpu_template);
Ok(VmmData::Empty)
}
fn set_vsock_device(&mut self, cfg: VsockDeviceConfig) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources
.set_vsock_device(cfg)
.map(|()| VmmData::Empty)
.map_err(VmmActionError::VsockConfig)
}
fn set_entropy_device(&mut self, cfg: EntropyDeviceConfig) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources.build_entropy_device(cfg)?;
Ok(VmmData::Empty)
}
fn set_memory_hotplug_device(
&mut self,
cfg: MemoryHotplugConfig,
) -> Result<VmmData, VmmActionError> {
self.boot_path = true;
self.vm_resources.set_memory_hotplug_config(cfg)?;
Ok(VmmData::Empty)
}
// On success, this command will end the pre-boot stage and this controller
// will be replaced by a runtime controller.
fn start_microvm(&mut self) -> Result<VmmData, VmmActionError> {
build_and_boot_microvm(
&self.instance_info,
self.vm_resources,
self.event_manager,
self.seccomp_filters,
)
.map(|vmm| {
self.built_vmm = Some(vmm);
VmmData::Empty
})
.map_err(VmmActionError::StartMicrovm)
}
// On success, this command will end the pre-boot stage and this controller
// will be replaced by a runtime controller.
fn load_snapshot(
&mut self,
load_params: &LoadSnapshotParams,
) -> Result<VmmData, LoadSnapshotError> {
let load_start_us = get_time_us(ClockType::Monotonic);
if self.boot_path {
let err = LoadSnapshotError::LoadSnapshotNotAllowed;
info!("{}", err);
return Err(err);
}
// Restore VM from snapshot
let vmm = restore_from_snapshot(
&self.instance_info,
self.event_manager,
self.seccomp_filters,
load_params,
self.vm_resources,
)
.inspect_err(|_| {
// If restore fails, we consider the process is too dirty to recover.
self.fatal_error = Some(BuildMicrovmFromRequestsError::Restore);
})?;
// Resume VM
if load_params.resume_vm {
vmm.lock()
.expect("Poisoned lock")
.resume_vm()
.inspect_err(|_| {
// If resume fails, we consider the process is too dirty to recover.
self.fatal_error = Some(BuildMicrovmFromRequestsError::Resume);
})?;
}
// Set the VM
self.built_vmm = Some(vmm);
debug!(
"'load snapshot' VMM action took {} us.",
update_metric_with_elapsed_time(&METRICS.latencies_us.vmm_load_snapshot, load_start_us)
);
Ok(VmmData::Empty)
}
}
/// Enables RPC interaction with a running Firecracker VMM.
#[derive(Debug)]
pub struct RuntimeApiController {
vmm: Arc<Mutex<Vmm>>,
vm_resources: VmResources,
}
impl MmdsRequestHandler for RuntimeApiController {
fn mmds(&mut self) -> Result<MutexGuard<'_, Mmds>, VmmActionError> {
self.vm_resources
.locked_mmds_or_default()
.map_err(VmmActionError::MmdsConfig)
}
}
impl RuntimeApiController {
/// Handles the incoming runtime `VmmAction` request and provides a response for it.
pub fn handle_request(&mut self, request: VmmAction) -> Result<VmmData, VmmActionError> {
use self::VmmAction::*;
match request {
// Supported operations allowed post-boot.
CreateSnapshot(snapshot_create_cfg) => self.create_snapshot(&snapshot_create_cfg),
FlushMetrics => self.flush_metrics(),
GetBalloonConfig => self
.vmm
.lock()
.expect("Poisoned lock")
.balloon_config()
.map(|state| VmmData::BalloonConfig(BalloonDeviceConfig::from(state)))
.map_err(VmmActionError::InternalVmm),
GetBalloonStats => self
.vmm
.lock()
.expect("Poisoned lock")
.latest_balloon_stats()
.map(VmmData::BalloonStats)
.map_err(VmmActionError::InternalVmm),
GetFullVmConfig => Ok(VmmData::FullVmConfig((&self.vm_resources).into())),
GetMemoryHotplugStatus => self
.vmm
.lock()
.expect("Poisoned lock")
.memory_hotplug_status()
.map(VmmData::VirtioMemStatus)
.map_err(VmmActionError::InternalVmm),
GetMMDS => self.get_mmds(),
GetVmMachineConfig => Ok(VmmData::MachineConfiguration(
self.vm_resources.machine_config.clone(),
)),
GetVmInstanceInfo => Ok(VmmData::InstanceInformation(
self.vmm.lock().expect("Poisoned lock").instance_info(),
)),
GetVmmVersion => Ok(VmmData::VmmVersion(
self.vmm.lock().expect("Poisoned lock").version(),
)),
PatchMMDS(value) => self.patch_mmds(value),
Pause => self.pause(),
PutMMDS(value) => self.put_mmds(value),
Resume => self.resume(),
#[cfg(target_arch = "x86_64")]
SendCtrlAltDel => self.send_ctrl_alt_del(),
UpdateBalloon(balloon_update) => self
.vmm
.lock()
.expect("Poisoned lock")
.update_balloon_config(balloon_update.amount_mib)
.map(|_| VmmData::Empty)
.map_err(VmmActionError::BalloonUpdate),
UpdateBalloonStatistics(balloon_stats_update) => self
.vmm
.lock()
.expect("Poisoned lock")
.update_balloon_stats_config(balloon_stats_update.stats_polling_interval_s)
.map(|_| VmmData::Empty)
.map_err(VmmActionError::BalloonUpdate),
StartFreePageHinting(cmd) => self
.vmm
.lock()
.expect("Poisoned lock")
.start_balloon_hinting(cmd)
.map(|_| VmmData::Empty)
.map_err(VmmActionError::BalloonUpdate),
GetFreePageHintingStatus => self
.vmm
.lock()
.expect("Poisoned lock")
.get_balloon_hinting_status()
.map(VmmData::HintingStatus)
.map_err(VmmActionError::BalloonUpdate),
StopFreePageHinting => self
.vmm
.lock()
.expect("Poisoned lock")
.stop_balloon_hinting()
.map(|_| VmmData::Empty)
.map_err(VmmActionError::BalloonUpdate),
UpdateBlockDevice(new_cfg) => self.update_block_device(new_cfg),
UpdateNetworkInterface(netif_update) => self.update_net_rate_limiters(netif_update),
UpdateMemoryHotplugSize(cfg) => self
.vmm
.lock()
.expect("Poisoned lock")
.update_memory_hotplug_size(cfg.requested_size_mib)
.map(|_| VmmData::Empty)
.map_err(VmmActionError::MemoryHotplugUpdate),
// Operations not allowed post-boot.
ConfigureBootSource(_)
| ConfigureLogger(_)
| ConfigureMetrics(_)
| ConfigureSerial(_)
| InsertBlockDevice(_)
| InsertPmemDevice(_)
| InsertNetworkDevice(_)
| LoadSnapshot(_)
| PutCpuConfiguration(_)
| SetBalloonDevice(_)
| SetVsockDevice(_)
| SetMmdsConfiguration(_)
| SetEntropyDevice(_)
| SetMemoryHotplugDevice(_)
| StartMicroVm
| UpdateMachineConfiguration(_) => Err(VmmActionError::OperationNotSupportedPostBoot),
}
}
/// Creates a new `RuntimeApiController`.
pub fn new(vm_resources: VmResources, vmm: Arc<Mutex<Vmm>>) -> Self {
Self { vmm, vm_resources }
}
/// Pauses the microVM by pausing the vCPUs.
pub fn pause(&mut self) -> Result<VmmData, VmmActionError> {
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/logger/mod.rs | src/vmm/src/logger/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Crate that implements Firecracker specific functionality as far as logging and metrics
//! collecting.
mod logging;
mod metrics;
pub use log::{Level, debug, error, info, log_enabled, trace, warn};
pub use logging::{
DEFAULT_INSTANCE_ID, DEFAULT_LEVEL, INSTANCE_ID, LOGGER, LevelFilter, LevelFilterFromStrError,
LoggerConfig, LoggerInitError, LoggerUpdateError,
};
pub use metrics::{
IncMetric, LatencyAggregateMetrics, METRICS, MetricsError, ProcessTimeReporter,
SharedIncMetric, SharedStoreMetric, StoreMetric,
};
use utils::time::{ClockType, get_time_us};
/// Alias for `std::io::LineWriter<std::fs::File>`.
pub type FcLineWriter = std::io::LineWriter<std::fs::File>;
/// Prefix to be used in log lines for functions/modules in Firecracker
/// that are not generally available.
const DEV_PREVIEW_LOG_PREFIX: &str = "[DevPreview]";
/// Log a standard warning message indicating a given feature name
/// is in development preview.
pub fn log_dev_preview_warning(feature_name: &str, msg_opt: Option<String>) {
match msg_opt {
None => warn!("{DEV_PREVIEW_LOG_PREFIX} {feature_name} is in development preview."),
Some(msg) => {
warn!("{DEV_PREVIEW_LOG_PREFIX} {feature_name} is in development preview - {msg}")
}
}
}
/// Helper function for updating the value of a store metric with elapsed time since some time in a
/// past.
pub fn update_metric_with_elapsed_time(metric: &SharedStoreMetric, start_time_us: u64) -> u64 {
let delta_us = get_time_us(ClockType::Monotonic) - start_time_us;
metric.store(delta_us);
delta_us
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/logger/logging.rs | src/vmm/src/logger/logging.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use std::io::Write;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::{Mutex, OnceLock};
use std::thread;
use log::{Log, Metadata, Record};
use serde::{Deserialize, Deserializer, Serialize};
use utils::time::LocalTime;
use super::metrics::{IncMetric, METRICS};
use crate::utils::open_file_write_nonblock;
/// Default level filter for logger matching the swagger specification
/// (`src/firecracker/swagger/firecracker.yaml`).
pub const DEFAULT_LEVEL: log::LevelFilter = log::LevelFilter::Info;
/// Default instance id.
pub const DEFAULT_INSTANCE_ID: &str = "anonymous-instance";
/// Instance id.
pub static INSTANCE_ID: OnceLock<String> = OnceLock::new();
/// The logger.
///
/// Default values matching the swagger specification (`src/firecracker/swagger/firecracker.yaml`).
pub static LOGGER: Logger = Logger(Mutex::new(LoggerConfiguration {
target: None,
filter: LogFilter { module: None },
format: LogFormat {
show_level: false,
show_log_origin: false,
},
}));
/// Error type for [`Logger::init`].
pub type LoggerInitError = log::SetLoggerError;
/// Error type for [`Logger::update`].
#[derive(Debug, thiserror::Error)]
#[error("Failed to open target file: {0}")]
pub struct LoggerUpdateError(pub std::io::Error);
impl Logger {
/// Initialize the logger.
pub fn init(&'static self) -> Result<(), LoggerInitError> {
log::set_logger(self)?;
log::set_max_level(DEFAULT_LEVEL);
Ok(())
}
/// Applies the given logger configuration the logger.
pub fn update(&self, config: LoggerConfig) -> Result<(), LoggerUpdateError> {
let mut guard = self.0.lock().unwrap();
log::set_max_level(
config
.level
.map(log::LevelFilter::from)
.unwrap_or(DEFAULT_LEVEL),
);
if let Some(log_path) = config.log_path {
let file = open_file_write_nonblock(&log_path).map_err(LoggerUpdateError)?;
guard.target = Some(file);
};
if let Some(show_level) = config.show_level {
guard.format.show_level = show_level;
}
if let Some(show_log_origin) = config.show_log_origin {
guard.format.show_log_origin = show_log_origin;
}
if let Some(module) = config.module {
guard.filter.module = Some(module);
}
// Ensure we drop the guard before attempting to log, otherwise this
// would deadlock.
drop(guard);
Ok(())
}
}
#[derive(Debug)]
pub struct LogFilter {
pub module: Option<String>,
}
#[derive(Debug)]
pub struct LogFormat {
pub show_level: bool,
pub show_log_origin: bool,
}
#[derive(Debug)]
pub struct LoggerConfiguration {
pub target: Option<std::fs::File>,
pub filter: LogFilter,
pub format: LogFormat,
}
#[derive(Debug)]
pub struct Logger(pub Mutex<LoggerConfiguration>);
impl Log for Logger {
// No additional filters to <https://docs.rs/log/latest/log/fn.max_level.html>.
fn enabled(&self, _metadata: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
// Lock the logger.
let mut guard = self.0.lock().unwrap();
// Check if the log message is enabled
{
let enabled_module = match (&guard.filter.module, record.module_path()) {
(Some(filter), Some(source)) => source.starts_with(filter),
(Some(_), None) => false,
(None, _) => true,
};
let enabled = enabled_module;
if !enabled {
return;
}
}
// Prints log message
{
let thread = thread::current().name().unwrap_or("-").to_string();
let level = match guard.format.show_level {
true => format!(":{}", record.level()),
false => String::new(),
};
let origin = match guard.format.show_log_origin {
true => {
let file = record.file().unwrap_or("?");
let line = match record.line() {
Some(x) => x.to_string(),
None => String::from("?"),
};
format!(":{file}:{line}")
}
false => String::new(),
};
let message = format!(
"{} [{}:{thread}{level}{origin}] {}\n",
LocalTime::now(),
INSTANCE_ID
.get()
.map(|s| s.as_str())
.unwrap_or(DEFAULT_INSTANCE_ID),
record.args()
);
let result = if let Some(file) = &mut guard.target {
file.write_all(message.as_bytes())
} else {
std::io::stdout().write_all(message.as_bytes())
};
// If the write returns an error, increment missed log count.
// No reason to log the error to stderr here, just increment the metric.
if result.is_err() {
METRICS.logger.missed_log_count.inc();
}
}
}
fn flush(&self) {}
}
/// Strongly typed structure used to describe the logger.
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct LoggerConfig {
/// Named pipe or file used as output for logs.
pub log_path: Option<PathBuf>,
/// The level of the Logger.
pub level: Option<LevelFilter>,
/// Whether to show the log level in the log.
pub show_level: Option<bool>,
/// Whether to show the log origin in the log.
pub show_log_origin: Option<bool>,
/// The module to filter logs by.
pub module: Option<String>,
}
/// This is required since we originally supported `Warning` and uppercase variants being used as
/// the log level filter. It would be a breaking change to no longer support this. In the next
/// breaking release this should be removed (replaced with `log::LevelFilter` and only supporting
/// its default deserialization).
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize)]
pub enum LevelFilter {
/// [`log::LevelFilter::Off`]
Off,
/// [`log::LevelFilter::Trace`]
Trace,
/// [`log::LevelFilter::Debug`]
Debug,
/// [`log::LevelFilter::Info`]
Info,
/// [`log::LevelFilter::Warn`]
Warn,
/// [`log::LevelFilter::Error`]
Error,
}
impl From<LevelFilter> for log::LevelFilter {
fn from(filter: LevelFilter) -> log::LevelFilter {
match filter {
LevelFilter::Off => log::LevelFilter::Off,
LevelFilter::Trace => log::LevelFilter::Trace,
LevelFilter::Debug => log::LevelFilter::Debug,
LevelFilter::Info => log::LevelFilter::Info,
LevelFilter::Warn => log::LevelFilter::Warn,
LevelFilter::Error => log::LevelFilter::Error,
}
}
}
impl<'de> Deserialize<'de> for LevelFilter {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
use serde::de::Error;
let key = String::deserialize(deserializer)?;
match key.to_lowercase().as_str() {
"off" => Ok(LevelFilter::Off),
"trace" => Ok(LevelFilter::Trace),
"debug" => Ok(LevelFilter::Debug),
"info" => Ok(LevelFilter::Info),
"warn" | "warning" => Ok(LevelFilter::Warn),
"error" => Ok(LevelFilter::Error),
_ => Err(D::Error::custom("Invalid LevelFilter")),
}
}
}
/// Error type for [`<LevelFilter as FromStr>::from_str`].
#[derive(Debug, PartialEq, Eq, thiserror::Error)]
#[error("Failed to parse string to level filter: {0}")]
pub struct LevelFilterFromStrError(String);
impl FromStr for LevelFilter {
type Err = LevelFilterFromStrError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_ascii_lowercase().as_str() {
"off" => Ok(Self::Off),
"trace" => Ok(Self::Trace),
"debug" => Ok(Self::Debug),
"info" => Ok(Self::Info),
"warn" | "warning" => Ok(Self::Warn),
"error" => Ok(Self::Error),
_ => Err(LevelFilterFromStrError(String::from(s))),
}
}
}
#[cfg(test)]
mod tests {
use log::Level;
use super::*;
#[test]
fn levelfilter_from_levelfilter() {
assert_eq!(
log::LevelFilter::from(LevelFilter::Off),
log::LevelFilter::Off
);
assert_eq!(
log::LevelFilter::from(LevelFilter::Trace),
log::LevelFilter::Trace
);
assert_eq!(
log::LevelFilter::from(LevelFilter::Debug),
log::LevelFilter::Debug
);
assert_eq!(
log::LevelFilter::from(LevelFilter::Info),
log::LevelFilter::Info
);
assert_eq!(
log::LevelFilter::from(LevelFilter::Warn),
log::LevelFilter::Warn
);
assert_eq!(
log::LevelFilter::from(LevelFilter::Error),
log::LevelFilter::Error
);
}
#[test]
fn levelfilter_from_str_all_variants() {
use itertools::Itertools;
#[derive(Deserialize)]
struct Foo {
#[allow(dead_code)]
level: LevelFilter,
}
for (level, level_enum) in [
("off", LevelFilter::Off),
("trace", LevelFilter::Trace),
("debug", LevelFilter::Debug),
("info", LevelFilter::Info),
("warn", LevelFilter::Warn),
("warning", LevelFilter::Warn),
("error", LevelFilter::Error),
] {
let multi = level.chars().map(|_| 0..=1).multi_cartesian_product();
for combination in multi {
let variant = level
.chars()
.zip_eq(combination)
.map(|(c, v)| match v {
0 => c.to_ascii_lowercase(),
1 => c.to_ascii_uppercase(),
_ => unreachable!(),
})
.collect::<String>();
let ex = format!("{{ \"level\": \"{}\" }}", variant);
assert_eq!(LevelFilter::from_str(&variant), Ok(level_enum));
assert!(serde_json::from_str::<Foo>(&ex).is_ok(), "{ex}");
}
}
let ex = "{{ \"level\": \"blah\" }}".to_string();
assert!(
serde_json::from_str::<Foo>(&ex).is_err(),
"expected error got {ex:#?}"
);
assert_eq!(
LevelFilter::from_str("bad"),
Err(LevelFilterFromStrError(String::from("bad")))
);
}
#[test]
fn logger() {
// Get temp file path.
let file = vmm_sys_util::tempfile::TempFile::new().unwrap();
let path = file.as_path().to_str().unwrap().to_string();
drop(file);
// Create temp file.
let target = std::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(&path)
.unwrap();
// Create logger.
let logger = Logger(Mutex::new(LoggerConfiguration {
target: Some(target),
filter: LogFilter {
module: Some(String::from("module")),
},
format: LogFormat {
show_level: true,
show_log_origin: true,
},
}));
// Assert results of enabled given specific metadata.
assert!(logger.enabled(&Metadata::builder().level(Level::Warn).build()));
assert!(logger.enabled(&Metadata::builder().level(Level::Debug).build()));
// Log
let metadata = Metadata::builder().level(Level::Error).build();
let record = Record::builder()
.args(format_args!("Error!"))
.metadata(metadata)
.file(Some("dir/app.rs"))
.line(Some(200))
.module_path(Some("module::server"))
.build();
logger.log(&record);
// Test calling flush.
logger.flush();
// Asserts result of log.
let contents = std::fs::read_to_string(&path).unwrap();
let (_time, rest) = contents.split_once(' ').unwrap();
let thread = thread::current().name().unwrap_or("-").to_string();
assert_eq!(
rest,
format!("[{DEFAULT_INSTANCE_ID}:{thread}:ERROR:dir/app.rs:200] Error!\n")
);
std::fs::remove_file(path).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/logger/metrics.rs | src/vmm/src/logger/metrics.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system.
//!
//! # Metrics format
//! The metrics are flushed in JSON format each 60 seconds. The first field will always be the
//! timestamp followed by the JSON representation of the structures representing each component on
//! which we are capturing specific metrics.
//!
//! ## JSON example with metrics:
//! ```json
//! {
//! "utc_timestamp_ms": 1541591155180,
//! "api_server": {
//! "process_startup_time_us": 0,
//! "process_startup_time_cpu_us": 0
//! },
//! "block": {
//! "activate_fails": 0,
//! "cfg_fails": 0,
//! "event_fails": 0,
//! "flush_count": 0,
//! "queue_event_count": 0,
//! "read_count": 0,
//! "write_count": 0
//! }
//! }
//! ```
//! The example above means that inside the structure representing all the metrics there is a field
//! named `block` which is in turn a serializable child structure collecting metrics for
//! the block device such as `activate_fails`, `cfg_fails`, etc.
//!
//! # Limitations
//! Metrics are only written to buffers.
//!
//! # Design
//! The main design goals of this system are:
//! * Use lockless operations, preferably ones that don't require anything other than simple
//! reads/writes being atomic.
//! * Exploit interior mutability and atomics being Sync to allow all methods (including the ones
//! which are effectively mutable) to be callable on a global non-mut static.
//! * Rely on `serde` to provide the actual serialization for writing the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them, to
//! avoid having to initialize everything by hand.
//!
//! The system implements 2 types of metrics:
//! * Shared Incremental Metrics (SharedIncMetrics) - dedicated for the metrics which need a counter
//! (i.e the number of times an API request failed). These metrics are reset upon flush.
//! * Shared Store Metrics (SharedStoreMetrics) - are targeted at keeping a persistent value, it is
//! not intended to act as a counter (i.e for measure the process start up time for example).
//!
//! The current approach for the `SharedIncMetrics` type is to store two values (current and
//! previous) and compute the delta between them each time we do a flush (i.e by serialization).
//! There are a number of advantages to this approach, including:
//! * We don't have to introduce an additional write (to reset the value) from the thread which does
//! to actual writing, so less synchronization effort is required.
//! * We don't have to worry at all that much about losing some data if writing fails for a while
//! (this could be a concern, I guess).
//!
//! If if turns out this approach is not really what we want, it's pretty easy to resort to
//! something else, while working behind the same interface.
use std::fmt::Debug;
use std::io::Write;
use std::ops::Deref;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Mutex, OnceLock};
use serde::{Serialize, Serializer};
use utils::time::{ClockType, get_time_ns, get_time_us};
use super::FcLineWriter;
use crate::devices::legacy;
use crate::devices::virtio::balloon::metrics as balloon_metrics;
use crate::devices::virtio::block::virtio::metrics as block_metrics;
use crate::devices::virtio::mem::metrics as virtio_mem_metrics;
use crate::devices::virtio::net::metrics as net_metrics;
use crate::devices::virtio::pmem::metrics as pmem_metrics;
use crate::devices::virtio::rng::metrics as entropy_metrics;
use crate::devices::virtio::vhost_user_metrics;
use crate::devices::virtio::vsock::metrics as vsock_metrics;
/// Static instance used for handling metrics.
pub static METRICS: Metrics<FirecrackerMetrics, FcLineWriter> =
Metrics::<FirecrackerMetrics, FcLineWriter>::new(FirecrackerMetrics::new());
/// Metrics system.
// All member fields have types which are Sync, and exhibit interior mutability, so
// we can call operations on metrics using a non-mut static global variable.
#[derive(Debug)]
pub struct Metrics<T: Serialize, M: Write + Send> {
// Metrics will get flushed here.
metrics_buf: OnceLock<Mutex<M>>,
pub app_metrics: T,
}
impl<T: Serialize + Debug, M: Write + Send + Debug> Metrics<T, M> {
/// Creates a new instance of the current metrics.
pub const fn new(app_metrics: T) -> Metrics<T, M> {
Metrics {
metrics_buf: OnceLock::new(),
app_metrics,
}
}
/// Initialize metrics system (once and only once).
/// Every call made after the first will have no effect besides returning `Ok` or `Err`.
///
/// This function is supposed to be called only from a single thread, once.
/// It is not thread-safe and is not meant to be used in a multithreaded
/// scenario. The reason `is_initialized` is an `AtomicBool` instead of
/// just a `bool` is that `lazy_static` enforces thread-safety on all its
/// members.
///
/// # Arguments
///
/// * `metrics_dest` - Buffer for JSON formatted metrics. Needs to implement `Write` and `Send`.
pub fn init(&self, metrics_dest: M) -> Result<(), MetricsError> {
self.metrics_buf
.set(Mutex::new(metrics_dest))
.map_err(|_| MetricsError::AlreadyInitialized)
}
/// Writes metrics to the destination provided as argument upon initialization of the metrics.
/// Upon failure, an error is returned if metrics system is initialized and metrics could not be
/// written.
/// Upon success, the function will return `True` (if metrics system was initialized and metrics
/// were successfully written to disk) or `False` (if metrics system was not yet initialized).
///
/// This function is usually supposed to be called only from a single thread and
/// is not meant to be used in a multithreaded scenario. The reason
/// `metrics_buf` is enclosed in a `Mutex` is that `lazy_static` enforces
/// thread-safety on all its members.
/// The only exception is for signal handlers that result in process exit, which may be run on
/// any thread. To prevent the race condition present in the serialisation step of
/// SharedIncMetrics, deadly signals use SharedStoreMetrics instead (which have a thread-safe
/// serialise implementation).
/// The only known caveat is that other metrics may not be properly written before exiting from
/// a signal handler. We make this compromise since the process will be killed anyway and the
/// important metric in this case is the signal one.
/// The alternative is to hold a Mutex over the entire function call, but this increases the
/// known deadlock potential.
pub fn write(&self) -> Result<bool, MetricsError> {
if let Some(lock) = self.metrics_buf.get() {
let mut writer = lock.lock().expect("poisoned lock");
serde_json::to_writer(writer.by_ref(), &self.app_metrics)
.map_err(|err| MetricsError::Serde(err.to_string()))?;
writer.write_all(b"\n").map_err(MetricsError::Write)?;
Ok(true)
} else {
// If the metrics are not initialized, no error is thrown but we do let the user know
// that metrics were not written.
Ok(false)
}
}
}
impl<T: Serialize + Debug, M: Write + Send + Debug> Deref for Metrics<T, M> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.app_metrics
}
}
/// Describes the errors which may occur while handling metrics scenarios.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MetricsError {
/// {0}
NeverInitialized(String),
/// Reinitialization of metrics not allowed.
AlreadyInitialized,
/// {0}
Serde(String),
/// Failed to write metrics: {0}
Write(std::io::Error),
}
/// Used for defining new types of metrics that act as a counter (i.e they are continuously updated
/// by incrementing their value).
pub trait IncMetric {
/// Adds `value` to the current counter.
fn add(&self, value: u64);
/// Increments by 1 unit the current counter.
fn inc(&self) {
self.add(1);
}
/// Returns current value of the counter.
fn count(&self) -> u64;
/// Returns diff of current and old value of the counter.
/// Mostly used in process of aggregating per device metrics.
fn fetch_diff(&self) -> u64;
}
/// Used for defining new types of metrics that do not need a counter and act as a persistent
/// indicator.
pub trait StoreMetric {
/// Returns current value of the counter.
fn fetch(&self) -> u64;
/// Stores `value` to the current counter.
fn store(&self, value: u64);
}
/// Representation of a metric that is expected to be incremented from more than one thread, so more
/// synchronization is necessary.
// It's currently used for vCPU metrics. An alternative here would be
// to have one instance of every metric for each thread, and to
// aggregate them when writing. However this probably overkill unless we have a lot of vCPUs
// incrementing metrics very often. Still, it's there if we ever need it :-s
// We will be keeping two values for each metric for being able to reset
// counters on each metric.
// 1st member - current value being updated
// 2nd member - old value that gets the current value whenever metrics is flushed to disk
#[derive(Debug, Default)]
pub struct SharedIncMetric(AtomicU64, AtomicU64);
impl SharedIncMetric {
/// Const default construction.
pub const fn new() -> Self {
Self(AtomicU64::new(0), AtomicU64::new(0))
}
}
/// Representation of a metric that is expected to hold a value that can be accessed
/// from more than one thread, so more synchronization is necessary.
#[derive(Debug, Default)]
pub struct SharedStoreMetric(AtomicU64);
impl SharedStoreMetric {
/// Const default construction.
pub const fn new() -> Self {
Self(AtomicU64::new(0))
}
}
impl IncMetric for SharedIncMetric {
// While the order specified for this operation is still Relaxed, the actual instruction will
// be an asm "LOCK; something" and thus atomic across multiple threads, simply because of the
// fetch_and_add (as opposed to "store(load() + 1)") implementation for atomics.
// TODO: would a stronger ordering make a difference here?
fn add(&self, value: u64) {
self.0.fetch_add(value, Ordering::Relaxed);
}
fn count(&self) -> u64 {
self.0.load(Ordering::Relaxed)
}
fn fetch_diff(&self) -> u64 {
self.0.load(Ordering::Relaxed) - self.1.load(Ordering::Relaxed)
}
}
impl StoreMetric for SharedStoreMetric {
fn fetch(&self) -> u64 {
self.0.load(Ordering::Relaxed)
}
fn store(&self, value: u64) {
self.0.store(value, Ordering::Relaxed);
}
}
impl Serialize for SharedIncMetric {
/// Reset counters of each metrics. Here we suppose that Serialize's goal is to help with the
/// flushing of metrics.
/// !!! Any print of the metrics will also reset them. Use with caution !!!
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let snapshot = self.0.load(Ordering::Relaxed);
let res = serializer.serialize_u64(snapshot - self.1.load(Ordering::Relaxed));
if res.is_ok() {
self.1.store(snapshot, Ordering::Relaxed);
}
res
}
}
impl Serialize for SharedStoreMetric {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_u64(self.0.load(Ordering::Relaxed))
}
}
/// Reporter object which computes the process wall time and
/// process CPU time and populates the metric with the results.
#[derive(Debug)]
pub struct ProcessTimeReporter {
// Process start time in us.
start_time_us: Option<u64>,
// Process CPU start time in us.
start_time_cpu_us: Option<u64>,
// Firecracker's parent process CPU time.
parent_cpu_time_us: Option<u64>,
}
impl ProcessTimeReporter {
/// Constructor for the process time-related reporter.
pub fn new(
start_time_us: Option<u64>,
start_time_cpu_us: Option<u64>,
parent_cpu_time_us: Option<u64>,
) -> ProcessTimeReporter {
ProcessTimeReporter {
start_time_us,
start_time_cpu_us,
parent_cpu_time_us,
}
}
/// Obtain process start time in microseconds.
pub fn report_start_time(&self) {
if let Some(start_time) = self.start_time_us {
let delta_us = get_time_us(ClockType::Monotonic) - start_time;
METRICS.api_server.process_startup_time_us.store(delta_us);
}
}
/// Obtain process CPU start time in microseconds.
pub fn report_cpu_start_time(&self) {
if let Some(cpu_start_time) = self.start_time_cpu_us {
let delta_us = get_time_us(ClockType::ProcessCpu) - cpu_start_time
+ self.parent_cpu_time_us.unwrap_or_default();
METRICS
.api_server
.process_startup_time_cpu_us
.store(delta_us);
}
}
}
// The following structs are used to define a certain organization for the set of metrics we
// are interested in. Whenever the name of a field differs from its ideal textual representation
// in the serialized form, we can use the #[serde(rename = "name")] attribute to, well, rename it.
/// Metrics related to the internal API server.
#[derive(Debug, Default, Serialize)]
pub struct ApiServerMetrics {
/// Measures the process's startup time in microseconds.
pub process_startup_time_us: SharedStoreMetric,
/// Measures the cpu's startup time in microseconds.
pub process_startup_time_cpu_us: SharedStoreMetric,
}
impl ApiServerMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
process_startup_time_us: SharedStoreMetric::new(),
process_startup_time_cpu_us: SharedStoreMetric::new(),
}
}
}
/// Metrics specific to GET API Requests for counting user triggered actions and/or failures.
#[derive(Debug, Default, Serialize)]
pub struct GetRequestsMetrics {
/// Number of GETs for getting information on the instance.
pub instance_info_count: SharedIncMetric,
/// Number of GETs for getting status on attaching machine configuration.
pub machine_cfg_count: SharedIncMetric,
/// Number of GETs for getting mmds.
pub mmds_count: SharedIncMetric,
/// Number of GETs for getting the VMM version.
pub vmm_version_count: SharedIncMetric,
/// Number of GETs for getting hotpluggable memory status.
pub hotplug_memory_count: SharedIncMetric,
}
impl GetRequestsMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
instance_info_count: SharedIncMetric::new(),
machine_cfg_count: SharedIncMetric::new(),
mmds_count: SharedIncMetric::new(),
vmm_version_count: SharedIncMetric::new(),
hotplug_memory_count: SharedIncMetric::new(),
}
}
}
/// Metrics specific to PUT API Requests for counting user triggered actions and/or failures.
#[derive(Debug, Default, Serialize)]
pub struct PutRequestsMetrics {
/// Number of PUTs triggering an action on the VM.
pub actions_count: SharedIncMetric,
/// Number of failures in triggering an action on the VM.
pub actions_fails: SharedIncMetric,
/// Number of PUTs for attaching source of boot.
pub boot_source_count: SharedIncMetric,
/// Number of failures during attaching source of boot.
pub boot_source_fails: SharedIncMetric,
/// Number of PUTs triggering a block attach.
pub drive_count: SharedIncMetric,
/// Number of failures in attaching a block device.
pub drive_fails: SharedIncMetric,
/// Number of PUTs for initializing the logging system.
pub logger_count: SharedIncMetric,
/// Number of failures in initializing the logging system.
pub logger_fails: SharedIncMetric,
/// Number of PUTs for configuring the machine.
pub machine_cfg_count: SharedIncMetric,
/// Number of failures in configuring the machine.
pub machine_cfg_fails: SharedIncMetric,
/// Number of PUTs for configuring a guest's vCPUs.
pub cpu_cfg_count: SharedIncMetric,
/// Number of failures in configuring a guest's vCPUs.
pub cpu_cfg_fails: SharedIncMetric,
/// Number of PUTs for initializing the metrics system.
pub metrics_count: SharedIncMetric,
/// Number of failures in initializing the metrics system.
pub metrics_fails: SharedIncMetric,
/// Number of PUTs for creating a new network interface.
pub network_count: SharedIncMetric,
/// Number of failures in creating a new network interface.
pub network_fails: SharedIncMetric,
/// Number of PUTs for creating mmds.
pub mmds_count: SharedIncMetric,
/// Number of failures in creating a new mmds.
pub mmds_fails: SharedIncMetric,
/// Number of PUTs for creating a vsock device.
pub vsock_count: SharedIncMetric,
/// Number of failures in creating a vsock device.
pub vsock_fails: SharedIncMetric,
/// Number of PUTs triggering a pmem attach.
pub pmem_count: SharedIncMetric,
/// Number of failures in attaching a pmem device.
pub pmem_fails: SharedIncMetric,
/// Number of PUTs to /serial
pub serial_count: SharedIncMetric,
/// Number of failed PUTs to /serial
pub serial_fails: SharedIncMetric,
/// Number of PUTs to /hotplug/memory
pub hotplug_memory_count: SharedIncMetric,
/// Number of failed PUTs to /hotplug/memory
pub hotplug_memory_fails: SharedIncMetric,
}
impl PutRequestsMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
actions_count: SharedIncMetric::new(),
actions_fails: SharedIncMetric::new(),
boot_source_count: SharedIncMetric::new(),
boot_source_fails: SharedIncMetric::new(),
drive_count: SharedIncMetric::new(),
drive_fails: SharedIncMetric::new(),
logger_count: SharedIncMetric::new(),
logger_fails: SharedIncMetric::new(),
machine_cfg_count: SharedIncMetric::new(),
machine_cfg_fails: SharedIncMetric::new(),
cpu_cfg_count: SharedIncMetric::new(),
cpu_cfg_fails: SharedIncMetric::new(),
metrics_count: SharedIncMetric::new(),
metrics_fails: SharedIncMetric::new(),
network_count: SharedIncMetric::new(),
network_fails: SharedIncMetric::new(),
mmds_count: SharedIncMetric::new(),
mmds_fails: SharedIncMetric::new(),
vsock_count: SharedIncMetric::new(),
vsock_fails: SharedIncMetric::new(),
pmem_count: SharedIncMetric::new(),
pmem_fails: SharedIncMetric::new(),
serial_count: SharedIncMetric::new(),
serial_fails: SharedIncMetric::new(),
hotplug_memory_count: SharedIncMetric::new(),
hotplug_memory_fails: SharedIncMetric::new(),
}
}
}
/// Metrics specific to PATCH API Requests for counting user triggered actions and/or failures.
#[derive(Debug, Default, Serialize)]
pub struct PatchRequestsMetrics {
/// Number of tries to PATCH a block device.
pub drive_count: SharedIncMetric,
/// Number of failures in PATCHing a block device.
pub drive_fails: SharedIncMetric,
/// Number of tries to PATCH a net device.
pub network_count: SharedIncMetric,
/// Number of failures in PATCHing a net device.
pub network_fails: SharedIncMetric,
/// Number of PATCHs for configuring the machine.
pub machine_cfg_count: SharedIncMetric,
/// Number of failures in configuring the machine.
pub machine_cfg_fails: SharedIncMetric,
/// Number of tries to PATCH an mmds.
pub mmds_count: SharedIncMetric,
/// Number of failures in PATCHing an mmds.
pub mmds_fails: SharedIncMetric,
/// Number of PATCHes to /hotplug/memory
pub hotplug_memory_count: SharedIncMetric,
/// Number of failed PATCHes to /hotplug/memory
pub hotplug_memory_fails: SharedIncMetric,
}
impl PatchRequestsMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
drive_count: SharedIncMetric::new(),
drive_fails: SharedIncMetric::new(),
network_count: SharedIncMetric::new(),
network_fails: SharedIncMetric::new(),
machine_cfg_count: SharedIncMetric::new(),
machine_cfg_fails: SharedIncMetric::new(),
mmds_count: SharedIncMetric::new(),
mmds_fails: SharedIncMetric::new(),
hotplug_memory_count: SharedIncMetric::new(),
hotplug_memory_fails: SharedIncMetric::new(),
}
}
}
/// Metrics related to deprecated user-facing API calls.
#[derive(Debug, Default, Serialize)]
pub struct DeprecatedApiMetrics {
/// Total number of calls to deprecated HTTP endpoints.
pub deprecated_http_api_calls: SharedIncMetric,
}
impl DeprecatedApiMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
deprecated_http_api_calls: SharedIncMetric::new(),
}
}
}
/// Metrics for the logging subsystem.
#[derive(Debug, Default, Serialize)]
pub struct LoggerSystemMetrics {
/// Number of misses on flushing metrics.
pub missed_metrics_count: SharedIncMetric,
/// Number of errors during metrics handling.
pub metrics_fails: SharedIncMetric,
/// Number of misses on logging human readable content.
pub missed_log_count: SharedIncMetric,
}
impl LoggerSystemMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
missed_metrics_count: SharedIncMetric::new(),
metrics_fails: SharedIncMetric::new(),
missed_log_count: SharedIncMetric::new(),
}
}
}
/// Metrics for the MMDS functionality.
#[derive(Debug, Default, Serialize)]
pub struct MmdsMetrics {
/// Number of frames rerouted to MMDS.
pub rx_accepted: SharedIncMetric,
/// Number of errors while handling a frame through MMDS.
pub rx_accepted_err: SharedIncMetric,
/// Number of uncommon events encountered while processing packets through MMDS.
pub rx_accepted_unusual: SharedIncMetric,
/// The number of buffers which couldn't be parsed as valid Ethernet frames by the MMDS.
pub rx_bad_eth: SharedIncMetric,
/// The number of GET requests with invalid tokens.
pub rx_invalid_token: SharedIncMetric,
/// The number of GET requests with no tokens.
pub rx_no_token: SharedIncMetric,
/// The total number of successful receive operations by the MMDS.
pub rx_count: SharedIncMetric,
/// The total number of bytes sent by the MMDS.
pub tx_bytes: SharedIncMetric,
/// The total number of successful send operations by the MMDS.
pub tx_count: SharedIncMetric,
/// The number of errors raised by the MMDS while attempting to send frames/packets/segments.
pub tx_errors: SharedIncMetric,
/// The number of frames sent by the MMDS.
pub tx_frames: SharedIncMetric,
/// The number of connections successfully accepted by the MMDS TCP handler.
pub connections_created: SharedIncMetric,
/// The number of connections cleaned up by the MMDS TCP handler.
pub connections_destroyed: SharedIncMetric,
}
impl MmdsMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
rx_accepted: SharedIncMetric::new(),
rx_accepted_err: SharedIncMetric::new(),
rx_accepted_unusual: SharedIncMetric::new(),
rx_bad_eth: SharedIncMetric::new(),
rx_invalid_token: SharedIncMetric::new(),
rx_no_token: SharedIncMetric::new(),
rx_count: SharedIncMetric::new(),
tx_bytes: SharedIncMetric::new(),
tx_count: SharedIncMetric::new(),
tx_errors: SharedIncMetric::new(),
tx_frames: SharedIncMetric::new(),
connections_created: SharedIncMetric::new(),
connections_destroyed: SharedIncMetric::new(),
}
}
}
/// Performance metrics related for the moment only to snapshots.
// These store the duration of creating/loading a snapshot and of
// pausing/resuming the microVM.
// If there are more than one `/snapshot/create` request in a minute
// (until the metrics are flushed), only the duration of the last
// snapshot creation is stored in the metric. If the user is interested
// in all the durations, a `FlushMetrics` request should be sent after
// each `create` request.
#[derive(Debug, Default, Serialize)]
pub struct PerformanceMetrics {
/// Measures the snapshot full create time, at the API (user) level, in microseconds.
pub full_create_snapshot: SharedStoreMetric,
/// Measures the snapshot diff create time, at the API (user) level, in microseconds.
pub diff_create_snapshot: SharedStoreMetric,
/// Measures the snapshot load time, at the API (user) level, in microseconds.
pub load_snapshot: SharedStoreMetric,
/// Measures the microVM pausing duration, at the API (user) level, in microseconds.
pub pause_vm: SharedStoreMetric,
/// Measures the microVM resuming duration, at the API (user) level, in microseconds.
pub resume_vm: SharedStoreMetric,
/// Measures the snapshot full create time, at the VMM level, in microseconds.
pub vmm_full_create_snapshot: SharedStoreMetric,
/// Measures the snapshot diff create time, at the VMM level, in microseconds.
pub vmm_diff_create_snapshot: SharedStoreMetric,
/// Measures the snapshot load time, at the VMM level, in microseconds.
pub vmm_load_snapshot: SharedStoreMetric,
/// Measures the microVM pausing duration, at the VMM level, in microseconds.
pub vmm_pause_vm: SharedStoreMetric,
/// Measures the microVM resuming duration, at the VMM level, in microseconds.
pub vmm_resume_vm: SharedStoreMetric,
}
impl PerformanceMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
full_create_snapshot: SharedStoreMetric::new(),
diff_create_snapshot: SharedStoreMetric::new(),
load_snapshot: SharedStoreMetric::new(),
pause_vm: SharedStoreMetric::new(),
resume_vm: SharedStoreMetric::new(),
vmm_full_create_snapshot: SharedStoreMetric::new(),
vmm_diff_create_snapshot: SharedStoreMetric::new(),
vmm_load_snapshot: SharedStoreMetric::new(),
vmm_pause_vm: SharedStoreMetric::new(),
vmm_resume_vm: SharedStoreMetric::new(),
}
}
}
/// Metrics for the seccomp filtering.
#[derive(Debug, Default, Serialize)]
pub struct SeccompMetrics {
/// Number of errors inside the seccomp filtering.
pub num_faults: SharedStoreMetric,
}
impl SeccompMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
num_faults: SharedStoreMetric::new(),
}
}
}
/// Metrics related to signals.
/// Deadly signals must be of `SharedStoreMetric` type, since they can ever be either 0 or 1.
/// This avoids a tricky race condition caused by the unatomic serialize method of
/// `SharedIncMetric`, between two threads calling `METRICS.write()`.
#[derive(Debug, Default, Serialize)]
pub struct SignalMetrics {
/// Number of times that SIGBUS was handled.
pub sigbus: SharedStoreMetric,
/// Number of times that SIGSEGV was handled.
pub sigsegv: SharedStoreMetric,
/// Number of times that SIGXFSZ was handled.
pub sigxfsz: SharedStoreMetric,
/// Number of times that SIGXCPU was handled.
pub sigxcpu: SharedStoreMetric,
/// Number of times that SIGPIPE was handled.
pub sigpipe: SharedIncMetric,
/// Number of times that SIGHUP was handled.
pub sighup: SharedStoreMetric,
/// Number of times that SIGILL was handled.
pub sigill: SharedStoreMetric,
}
impl SignalMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
sigbus: SharedStoreMetric::new(),
sigsegv: SharedStoreMetric::new(),
sigxfsz: SharedStoreMetric::new(),
sigxcpu: SharedStoreMetric::new(),
sigpipe: SharedIncMetric::new(),
sighup: SharedStoreMetric::new(),
sigill: SharedStoreMetric::new(),
}
}
}
/// Provides efficient way to record LatencyAggregateMetrics
#[derive(Debug)]
pub struct LatencyMetricsRecorder<'a> {
start_time: u64,
metric: &'a LatencyAggregateMetrics,
}
impl<'a> LatencyMetricsRecorder<'a> {
/// Const default construction.
fn new(metric: &'a LatencyAggregateMetrics) -> Self {
Self {
start_time: get_time_us(ClockType::Monotonic),
metric,
}
}
}
impl Drop for LatencyMetricsRecorder<'_> {
/// records aggregate (min/max/sum) for the given metric
/// This captures delta between self.start_time and current time
/// and updates min/max/sum metrics.
/// self.start_time is recorded in new() and metrics are updated in drop
fn drop(&mut self) {
let delta_us = get_time_us(ClockType::Monotonic) - self.start_time;
self.metric.sum_us.add(delta_us);
let min_us = self.metric.min_us.fetch();
let max_us = self.metric.max_us.fetch();
if (0 == min_us) || (min_us > delta_us) {
self.metric.min_us.store(delta_us);
}
if (0 == max_us) || (max_us < delta_us) {
self.metric.max_us.store(delta_us);
}
}
}
/// Used to record Aggregate (min/max/sum) of latency metrics
#[derive(Debug, Default, Serialize)]
pub struct LatencyAggregateMetrics {
/// represents minimum value of the metrics in microseconds
pub min_us: SharedStoreMetric,
/// represents maximum value of the metrics in microseconds
pub max_us: SharedStoreMetric,
/// represents sum of the metrics in microseconds
pub sum_us: SharedIncMetric,
}
impl LatencyAggregateMetrics {
/// Const default construction.
pub const fn new() -> Self {
Self {
min_us: SharedStoreMetric::new(),
max_us: SharedStoreMetric::new(),
sum_us: SharedIncMetric::new(),
}
}
/// returns a latency recorder which captures stores start_time
/// and updates the actual metrics at the end of recorders lifetime.
/// in short instead of below 2 lines :
/// 1st for start_time_us = get_time_us()
/// 2nd for delta_time_us = get_time_us() - start_time; and metrics.store(delta_time_us)
/// we have just `_m = metrics.record_latency_metrics()`
pub fn record_latency_metrics(&self) -> LatencyMetricsRecorder<'_> {
LatencyMetricsRecorder::new(self)
}
}
/// Structure provides Metrics specific to VCPUs' mode of functioning.
/// Sample_count or number of kvm exits for IO and MMIO VM exits are covered by:
/// `exit_io_in`, `exit_io_out`, `exit_mmio_read` and , `exit_mmio_write`.
/// Count of other vm exits for events like shutdown/hlt/errors are
/// covered by existing "failures" metric.
/// The only vm exit for which sample_count is not covered is system
/// event reset/shutdown but that should be fine since they are not
/// failures and the vm is terminated anyways.
/// LatencyAggregateMetrics only covers minimum, maximum and sum
/// because average can be deduced from available metrics. e.g.
/// dividing `exit_io_in_agg.sum_us` by exit_io_in` gives average of KVM exits handling input IO.
#[derive(Debug, Default, Serialize)]
pub struct VcpuMetrics {
/// Number of KVM exits for handling input IO.
pub exit_io_in: SharedIncMetric,
/// Number of KVM exits for handling output IO.
pub exit_io_out: SharedIncMetric,
/// Number of KVM exits for handling MMIO reads.
pub exit_mmio_read: SharedIncMetric,
/// Number of KVM exits for handling MMIO writes.
pub exit_mmio_write: SharedIncMetric,
/// Number of errors during this VCPU's run.
pub failures: SharedIncMetric,
/// Number of times that the `KVM_KVMCLOCK_CTRL` ioctl failed.
pub kvmclock_ctrl_fails: SharedIncMetric,
/// Provides Min/max/sum for KVM exits handling input IO.
pub exit_io_in_agg: LatencyAggregateMetrics,
/// Provides Min/max/sum for KVM exits handling output IO.
pub exit_io_out_agg: LatencyAggregateMetrics,
/// Provides Min/max/sum for KVM exits handling MMIO reads.
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/restriction.rs | src/vmm/src/io_uring/restriction.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Seccomp-like restrictions for the allowed operations on an IoUring instance.
//!
//! One can configure the restrictions to only allow certain operations and/or allow only ops on
//! registered files.
//! If passed to the [`IoUring`] constructor, they take effect immediately and can never be
//! deactivated.
//!
//! [`IoUring`]: ../struct.IoUring.html
use std::convert::From;
use crate::io_uring::generated::{
io_uring_register_restriction_op, io_uring_restriction, io_uring_sqe_flags_bit,
};
use crate::io_uring::operation::OpCode;
/// Adds support for restricting the operations allowed by io_uring.
#[derive(Debug)]
pub enum Restriction {
/// Allow an operation.
AllowOpCode(OpCode),
/// Only allow operations on pre-registered fds.
RequireFixedFds,
}
impl From<&Restriction> for io_uring_restriction {
fn from(restriction: &Restriction) -> Self {
use Restriction::*;
// SAFETY: Safe because it only contains integer values.
let mut instance: Self = unsafe { std::mem::zeroed() };
match restriction {
AllowOpCode(opcode) => {
instance.opcode =
u16::try_from(io_uring_register_restriction_op::IORING_RESTRICTION_SQE_OP)
.unwrap();
instance.__bindgen_anon_1.sqe_op = *opcode as u8;
}
RequireFixedFds => {
instance.opcode = u16::try_from(
io_uring_register_restriction_op::IORING_RESTRICTION_SQE_FLAGS_REQUIRED,
)
.unwrap();
instance.__bindgen_anon_1.sqe_flags =
1 << io_uring_sqe_flags_bit::IOSQE_FIXED_FILE_BIT;
}
};
instance
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/mod.rs | src/vmm/src/io_uring/mod.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
mod generated;
pub mod operation;
mod probe;
mod queue;
pub mod restriction;
use std::collections::HashSet;
use std::fmt::Debug;
use std::fs::File;
use std::io::Error as IOError;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use generated::io_uring_params;
use operation::{Cqe, FixedFd, OpCode, Operation};
use probe::{PROBE_LEN, ProbeWrapper};
pub use queue::completion::CQueueError;
use queue::completion::CompletionQueue;
pub use queue::submission::SQueueError;
use queue::submission::SubmissionQueue;
use restriction::Restriction;
use vmm_sys_util::syscall::SyscallReturnCode;
use crate::io_uring::generated::io_uring_register_op;
// IO_uring operations that we require to be supported by the host kernel.
const REQUIRED_OPS: [OpCode; 2] = [OpCode::Read, OpCode::Write];
// Taken from linux/fs/io_uring.c
const IORING_MAX_FIXED_FILES: usize = 1 << 15;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// IoUring Error.
pub enum IoUringError {
/// Error originating in the completion queue: {0}
CQueue(CQueueError),
/// Could not enable the ring: {0}
Enable(IOError),
/// A FamStructWrapper operation has failed: {0}
Fam(vmm_sys_util::fam::Error),
/// The number of ops in the ring is >= CQ::count
FullCQueue,
/// Fd was not registered: {0}
InvalidFixedFd(FixedFd),
/// There are no registered fds.
NoRegisteredFds,
/// Error probing the io_uring subsystem: {0}
Probe(IOError),
/// Could not register eventfd: {0}
RegisterEventfd(IOError),
/// Could not register file: {0}
RegisterFile(IOError),
/// Attempted to register too many files.
RegisterFileLimitExceeded,
/// Could not register restrictions: {0}
RegisterRestrictions(IOError),
/// Error calling io_uring_setup: {0}
Setup(IOError),
/// Error originating in the submission queue: {0}
SQueue(SQueueError),
/// Required feature is not supported on the host kernel: {0}
UnsupportedFeature(&'static str),
/// Required operation is not supported on the host kernel: {0}
UnsupportedOperation(&'static str),
}
impl IoUringError {
/// Return true if this error is caused by a full submission or completion queue.
pub fn is_throttling_err(&self) -> bool {
matches!(
self,
Self::FullCQueue | Self::SQueue(SQueueError::FullQueue)
)
}
}
/// Main object representing an io_uring instance.
#[derive(Debug)]
pub struct IoUring<T> {
registered_fds_count: u32,
squeue: SubmissionQueue,
cqueue: CompletionQueue,
// Make sure the fd is declared after the queues, so that it isn't dropped before them.
// If we drop the queues after the File, the associated kernel mem will never be freed.
// The correct cleanup order is munmap(rings) -> close(fd).
// We don't need to manually drop the fields in order,since Rust has a well defined drop order.
fd: File,
// The total number of ops. These includes the ops on the submission queue, the in-flight ops
// and the ops that are in the CQ, but haven't been popped yet.
num_ops: u32,
slab: slab::Slab<T>,
}
impl<T: Debug> IoUring<T> {
/// Create a new instance.
///
/// # Arguments
///
/// * `num_entries` - Requested number of entries in the ring. Will be rounded up to the nearest
/// power of two.
/// * `files` - Files to be registered for IO.
/// * `restrictions` - Vector of [`Restriction`](restriction/enum.Restriction.html)s
/// * `eventfd` - Optional eventfd for receiving completion notifications.
pub fn new(
num_entries: u32,
files: Vec<&File>,
restrictions: Vec<Restriction>,
eventfd: Option<RawFd>,
) -> Result<Self, IoUringError> {
let mut params = io_uring_params {
// Create the ring as disabled, so that we may register restrictions.
flags: generated::IORING_SETUP_R_DISABLED,
..Default::default()
};
// SAFETY: Safe because values are valid and we check the return value.
let fd = SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_io_uring_setup,
num_entries,
&mut params as *mut io_uring_params,
)
})
.into_result()
.map_err(IoUringError::Setup)?;
// Safe to unwrap because the fd is valid.
let fd = RawFd::try_from(fd).unwrap();
// SAFETY: Safe because the fd is valid and because this struct owns the fd.
let file = unsafe { File::from_raw_fd(fd) };
Self::check_features(params)?;
let squeue = SubmissionQueue::new(fd, ¶ms).map_err(IoUringError::SQueue)?;
let cqueue = CompletionQueue::new(fd, ¶ms).map_err(IoUringError::CQueue)?;
let slab =
slab::Slab::with_capacity(params.sq_entries as usize + params.cq_entries as usize);
let mut instance = Self {
squeue,
cqueue,
fd: file,
registered_fds_count: 0,
num_ops: 0,
slab,
};
instance.check_operations()?;
if let Some(eventfd) = eventfd {
instance.register_eventfd(eventfd)?;
}
instance.register_restrictions(restrictions)?;
instance.register_files(files)?;
instance.enable()?;
Ok(instance)
}
/// Push an [`Operation`](operation/struct.Operation.html) onto the submission queue.
pub fn push(&mut self, op: Operation<T>) -> Result<(), (IoUringError, T)> {
// validate that we actually did register fds
let fd = op.fd();
match self.registered_fds_count {
0 => Err((IoUringError::NoRegisteredFds, op.user_data)),
len if fd >= len => Err((IoUringError::InvalidFixedFd(fd), op.user_data)),
_ => {
if self.num_ops >= self.cqueue.count() {
return Err((IoUringError::FullCQueue, op.user_data));
}
self.squeue
.push(op.into_sqe(&mut self.slab))
.inspect(|_| {
// This is safe since self.num_ops < IORING_MAX_CQ_ENTRIES (65536)
self.num_ops += 1;
})
.map_err(|(sqe_err, user_data_key)| -> (IoUringError, T) {
(
IoUringError::SQueue(sqe_err),
// We don't use slab.try_remove here for 2 reasons:
// 1. user_data was inserted in slab with step `op.into_sqe` just
// before the push op so the user_data key should be valid and if
// key is valid then `slab.remove()` will not fail.
// 2. If we use `slab.try_remove()` we'll have to find a way to return
// a default value for the generic type T which is difficult because
// it expands to more crates which don't make it easy to define a
// default/clone type for type T.
// So believing that `slab.remove` won't fail we don't use
// the `slab.try_remove` method.
#[allow(clippy::cast_possible_truncation)]
self.slab.remove(user_data_key as usize),
)
})
}
}
}
/// Pop a completed entry off the completion queue. Returns `Ok(None)` if there are no entries.
/// The type `T` must be the same as the `user_data` type used for `push`-ing the operation.
pub fn pop(&mut self) -> Result<Option<Cqe<T>>, IoUringError> {
self.cqueue
.pop(&mut self.slab)
.map(|maybe_cqe| {
maybe_cqe.inspect(|_| {
// This is safe since the pop-ed CQEs have been previously pushed. However
// we use a saturating_sub for extra safety.
self.num_ops = self.num_ops.saturating_sub(1);
})
})
.map_err(IoUringError::CQueue)
}
fn do_submit(&mut self, min_complete: u32) -> Result<u32, IoUringError> {
self.squeue
.submit(min_complete)
.map_err(IoUringError::SQueue)
}
/// Submit all operations but don't wait for any completions.
pub fn submit(&mut self) -> Result<u32, IoUringError> {
self.do_submit(0)
}
/// Submit all operations and wait for their completion.
pub fn submit_and_wait_all(&mut self) -> Result<u32, IoUringError> {
self.do_submit(self.num_ops)
}
/// Return the number of operations currently on the submission queue.
pub fn pending_sqes(&self) -> Result<u32, IoUringError> {
self.squeue.pending().map_err(IoUringError::SQueue)
}
/// A total of the number of ops in the submission and completion queues, as well as the
/// in-flight ops.
pub fn num_ops(&self) -> u32 {
self.num_ops
}
fn enable(&mut self) -> Result<(), IoUringError> {
// SAFETY: Safe because values are valid and we check the return value.
SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_io_uring_register,
self.fd.as_raw_fd(),
io_uring_register_op::IORING_REGISTER_ENABLE_RINGS,
std::ptr::null::<libc::c_void>(),
0,
)
})
.into_empty_result()
.map_err(IoUringError::Enable)
}
fn register_files(&mut self, files: Vec<&File>) -> Result<(), IoUringError> {
if files.is_empty() {
// No-op.
return Ok(());
}
if (self.registered_fds_count as usize).saturating_add(files.len()) > IORING_MAX_FIXED_FILES
{
return Err(IoUringError::RegisterFileLimitExceeded);
}
// SAFETY: Safe because values are valid and we check the return value.
SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_io_uring_register,
self.fd.as_raw_fd(),
io_uring_register_op::IORING_REGISTER_FILES,
files
.iter()
.map(|f| f.as_raw_fd())
.collect::<Vec<_>>()
.as_mut_slice()
.as_mut_ptr() as *const _,
files.len(),
)
})
.into_empty_result()
.map_err(IoUringError::RegisterFile)?;
// Safe to truncate since files.len() < IORING_MAX_FIXED_FILES
self.registered_fds_count += u32::try_from(files.len()).unwrap();
Ok(())
}
fn register_eventfd(&self, fd: RawFd) -> Result<(), IoUringError> {
// SAFETY: Safe because values are valid and we check the return value.
SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_io_uring_register,
self.fd.as_raw_fd(),
io_uring_register_op::IORING_REGISTER_EVENTFD,
(&fd) as *const _,
1,
)
})
.into_empty_result()
.map_err(IoUringError::RegisterEventfd)
}
fn register_restrictions(&self, restrictions: Vec<Restriction>) -> Result<(), IoUringError> {
if restrictions.is_empty() {
// No-op.
return Ok(());
}
// SAFETY: Safe because values are valid and we check the return value.
SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_io_uring_register,
self.fd.as_raw_fd(),
io_uring_register_op::IORING_REGISTER_RESTRICTIONS,
restrictions
.iter()
.map(generated::io_uring_restriction::from)
.collect::<Vec<_>>()
.as_mut_slice()
.as_mut_ptr(),
restrictions.len(),
)
})
.into_empty_result()
.map_err(IoUringError::RegisterRestrictions)
}
fn check_features(params: io_uring_params) -> Result<(), IoUringError> {
// We require that the host kernel will never drop completed entries due to an (unlikely)
// overflow in the completion queue.
// This feature is supported for kernels greater than 5.7.
// An alternative fix would be to keep an internal counter that tracks the number of
// submitted entries that haven't been completed and makes sure it doesn't exceed
// (2 * num_entries).
if (params.features & generated::IORING_FEAT_NODROP) == 0 {
return Err(IoUringError::UnsupportedFeature("IORING_FEAT_NODROP"));
}
Ok(())
}
fn check_operations(&self) -> Result<(), IoUringError> {
let mut probes = ProbeWrapper::new(PROBE_LEN).map_err(IoUringError::Fam)?;
// SAFETY: Safe because values are valid and we check the return value.
SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_io_uring_register,
self.fd.as_raw_fd(),
io_uring_register_op::IORING_REGISTER_PROBE,
probes.as_mut_fam_struct_ptr(),
PROBE_LEN,
)
})
.into_empty_result()
.map_err(IoUringError::Probe)?;
let supported_opcodes: HashSet<u8> = probes
.as_slice()
.iter()
.filter(|op| ((u32::from(op.flags)) & generated::IO_URING_OP_SUPPORTED) != 0)
.map(|op| op.op)
.collect();
for opcode in REQUIRED_OPS.iter() {
if !supported_opcodes.contains(&(*opcode as u8)) {
return Err(IoUringError::UnsupportedOperation((*opcode).into()));
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::fs::FileExt;
use proptest::prelude::*;
use proptest::strategy::Strategy;
use proptest::test_runner::{Config, TestRunner};
use vm_memory::VolatileMemory;
use vmm_sys_util::syscall::SyscallReturnCode;
use vmm_sys_util::tempfile::TempFile;
/// -------------------------------------
/// BEGIN PROPERTY BASED TESTING
use super::*;
use crate::vstate::memory::{Bytes, MmapRegion};
fn drain_cqueue(ring: &mut IoUring<u32>) {
while let Some(entry) = ring.pop().unwrap() {
entry.result().unwrap();
// Assert that there were no partial writes.
let count = entry.result().unwrap();
let user_data = entry.user_data();
assert_eq!(count, user_data);
}
}
fn setup_mem_region(len: usize) -> MmapRegion {
const PROT: i32 = libc::PROT_READ | libc::PROT_WRITE;
const FLAGS: i32 = libc::MAP_ANONYMOUS | libc::MAP_PRIVATE;
let ptr = unsafe { libc::mmap(std::ptr::null_mut(), len, PROT, FLAGS, -1, 0) };
if (ptr as isize) < 0 {
panic!("Mmap failed with {}", std::io::Error::last_os_error());
}
unsafe {
// Use the raw version because we want to unmap memory ourselves.
MmapRegion::build_raw(ptr.cast::<u8>(), len, PROT, FLAGS).unwrap()
}
}
fn free_mem_region(region: MmapRegion) {
unsafe { libc::munmap(region.as_ptr().cast::<libc::c_void>(), region.len()) };
}
fn read_entire_mem_region(region: &MmapRegion) -> Vec<u8> {
let mut result = vec![0u8; region.len()];
let count = region.as_volatile_slice().read(&mut result[..], 0).unwrap();
assert_eq!(count, region.len());
result
}
#[allow(clippy::let_with_type_underscore)]
fn arbitrary_rw_operation(file_len: u32) -> impl Strategy<Value = Operation<u32>> {
(
// OpCode: 0 -> Write, 1 -> Read.
0..2,
// Length of the operation.
0u32..file_len,
)
.prop_flat_map(move |(op, len)| {
(
// op
Just(op),
// len
Just(len),
// offset
(0u32..(file_len - len)),
// mem region offset
(0u32..(file_len - len)),
)
})
.prop_map(move |(op, len, off, mem_off)| {
// We actually use an offset instead of an address, because we later need to modify
// the memory region on which the operation is performed, based on the opcode.
let mut operation = match op {
0 => Operation::write(0, mem_off as usize, len, off.into(), len),
_ => Operation::read(0, mem_off as usize, len, off.into(), len),
};
// Make sure the operations are executed in-order, so that they are equivalent to
// their sync counterparts.
operation.set_linked();
operation
})
}
#[test]
fn proptest_read_write_correctness() {
// Performs a sequence of random read and write operations on two files, with sync and
// async IO, respectively.
// Verifies that the files are identical afterwards and that the read operations returned
// the same values.
const FILE_LEN: u32 = 1024;
// The number of arbitrary operations in a testrun.
const OPS_COUNT: usize = 2000;
const RING_SIZE: u32 = 128;
// Allocate and init memory for holding the data that will be written into the file.
let write_mem_region = setup_mem_region(FILE_LEN as usize);
let sync_read_mem_region = setup_mem_region(FILE_LEN as usize);
let async_read_mem_region = setup_mem_region(FILE_LEN as usize);
// Init the write buffers with 0,1,2,...
for i in 0..FILE_LEN {
write_mem_region
.as_volatile_slice()
.write_obj(u8::try_from(i % u32::from(u8::MAX)).unwrap(), i as usize)
.unwrap();
}
// Create two files and init their contents to zeros.
let init_contents = [0u8; FILE_LEN as usize];
let file_async = TempFile::new().unwrap().into_file();
file_async.write_all_at(&init_contents, 0).unwrap();
let file_sync = TempFile::new().unwrap().into_file();
file_sync.write_all_at(&init_contents, 0).unwrap();
// Create a custom test runner since we had to add some state buildup to the test.
// (Referring to the above initializations).
let mut runner = TestRunner::new(Config {
#[cfg(target_arch = "x86_64")]
cases: 1000, // Should run for about a minute.
// Lower the cases on ARM since they take longer and cause coverage test timeouts.
#[cfg(target_arch = "aarch64")]
cases: 500,
..Config::default()
});
runner
.run(
&proptest::collection::vec(arbitrary_rw_operation(FILE_LEN), OPS_COUNT),
|set| {
let mut ring =
IoUring::new(RING_SIZE, vec![&file_async], vec![], None).unwrap();
for mut operation in set {
// Perform the sync op.
let count = match operation.opcode {
OpCode::Write => u32::try_from(
SyscallReturnCode(unsafe {
libc::pwrite(
file_sync.as_raw_fd(),
write_mem_region.as_ptr().add(operation.addr.unwrap())
as *const libc::c_void,
operation.len.unwrap() as usize,
i64::try_from(operation.offset.unwrap()).unwrap(),
)
})
.into_result()
.unwrap(),
)
.unwrap(),
OpCode::Read => u32::try_from(
SyscallReturnCode(unsafe {
libc::pread(
file_sync.as_raw_fd(),
sync_read_mem_region
.as_ptr()
.add(operation.addr.unwrap())
.cast::<libc::c_void>(),
operation.len.unwrap() as usize,
i64::try_from(operation.offset.unwrap()).unwrap(),
)
})
.into_result()
.unwrap(),
)
.unwrap(),
_ => unreachable!(),
};
if count < operation.len.unwrap() {
panic!("Synchronous partial operation: {:?}", operation);
}
// Perform the async op.
// Modify the operation address based on the opcode.
match operation.opcode {
OpCode::Write => {
operation.addr = Some(unsafe {
write_mem_region.as_ptr().add(operation.addr.unwrap()) as usize
})
}
OpCode::Read => {
operation.addr = Some(unsafe {
async_read_mem_region.as_ptr().add(operation.addr.unwrap())
as usize
})
}
_ => unreachable!(),
};
// If the ring is full, submit and wait.
if ring.pending_sqes().unwrap() == RING_SIZE {
ring.submit_and_wait_all().unwrap();
drain_cqueue(&mut ring);
}
ring.push(operation).unwrap();
}
// Submit any left async ops and wait.
ring.submit_and_wait_all().unwrap();
drain_cqueue(&mut ring);
// Get the write result for async IO.
let mut async_result = [0u8; FILE_LEN as usize];
file_async.read_exact_at(&mut async_result, 0).unwrap();
// Get the write result for sync IO.
let mut sync_result = [0u8; FILE_LEN as usize];
file_sync.read_exact_at(&mut sync_result, 0).unwrap();
// Now compare the write results.
assert_eq!(sync_result, async_result);
// Now compare the read results for sync and async IO.
assert_eq!(
read_entire_mem_region(&sync_read_mem_region),
read_entire_mem_region(&async_read_mem_region)
);
Ok(())
},
)
.unwrap();
// Clean up the memory.
free_mem_region(write_mem_region);
free_mem_region(sync_read_mem_region);
free_mem_region(async_read_mem_region);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/probe.rs | src/vmm/src/io_uring/probe.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm_sys_util::fam::{FamStruct, FamStructWrapper};
use vmm_sys_util::generate_fam_struct_impl;
use crate::io_uring::generated::{io_uring_probe, io_uring_probe_op};
// There is no max for the number of operations returned by probing. So we fallback to using the
// number of values representable in a u8;
pub(crate) const PROBE_LEN: usize = u8::MAX as usize + 1;
generate_fam_struct_impl!(
io_uring_probe,
io_uring_probe_op,
ops,
u8,
ops_len,
PROBE_LEN
);
pub(crate) type ProbeWrapper = FamStructWrapper<io_uring_probe>;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/generated.rs | src/vmm/src/io_uring/generated.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// automatically generated by tools/bindgen.sh
#![allow(
non_camel_case_types,
non_upper_case_globals,
dead_code,
non_snake_case,
clippy::ptr_as_ptr,
clippy::undocumented_unsafe_blocks,
missing_debug_implementations,
clippy::tests_outside_test_module,
unsafe_op_in_unsafe_fn,
clippy::redundant_static_lifetimes
)]
#[repr(C)]
#[derive(Default)]
pub struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>, [T; 0]);
impl<T> __IncompleteArrayField<T> {
#[inline]
pub const fn new() -> Self {
__IncompleteArrayField(::std::marker::PhantomData, [])
}
#[inline]
pub fn as_ptr(&self) -> *const T {
self as *const _ as *const T
}
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
self as *mut _ as *mut T
}
#[inline]
pub unsafe fn as_slice(&self, len: usize) -> &[T] {
::std::slice::from_raw_parts(self.as_ptr(), len)
}
#[inline]
pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
}
}
impl<T> ::std::fmt::Debug for __IncompleteArrayField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_str("__IncompleteArrayField")
}
}
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl<T> __BindgenUnionField<T> {
#[inline]
pub const fn new() -> Self {
__BindgenUnionField(::std::marker::PhantomData)
}
#[inline]
pub unsafe fn as_ref(&self) -> &T {
::std::mem::transmute(self)
}
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T {
::std::mem::transmute(self)
}
}
impl<T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<T> ::std::marker::Copy for __BindgenUnionField<T> {}
impl<T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
}
}
impl<T> ::std::hash::Hash for __BindgenUnionField<T> {
fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {}
}
impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> {
fn eq(&self, _other: &__BindgenUnionField<T>) -> bool {
true
}
}
impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {}
pub const IORING_FILE_INDEX_ALLOC: i32 = -1;
pub const IORING_SETUP_IOPOLL: u32 = 1;
pub const IORING_SETUP_SQPOLL: u32 = 2;
pub const IORING_SETUP_SQ_AFF: u32 = 4;
pub const IORING_SETUP_CQSIZE: u32 = 8;
pub const IORING_SETUP_CLAMP: u32 = 16;
pub const IORING_SETUP_ATTACH_WQ: u32 = 32;
pub const IORING_SETUP_R_DISABLED: u32 = 64;
pub const IORING_SETUP_SUBMIT_ALL: u32 = 128;
pub const IORING_SETUP_COOP_TASKRUN: u32 = 256;
pub const IORING_SETUP_TASKRUN_FLAG: u32 = 512;
pub const IORING_SETUP_SQE128: u32 = 1024;
pub const IORING_SETUP_CQE32: u32 = 2048;
pub const IORING_SETUP_SINGLE_ISSUER: u32 = 4096;
pub const IORING_SETUP_DEFER_TASKRUN: u32 = 8192;
pub const IORING_SETUP_NO_MMAP: u32 = 16384;
pub const IORING_SETUP_REGISTERED_FD_ONLY: u32 = 32768;
pub const IORING_SETUP_NO_SQARRAY: u32 = 65536;
pub const IORING_SETUP_HYBRID_IOPOLL: u32 = 131072;
pub const IORING_URING_CMD_FIXED: u32 = 1;
pub const IORING_URING_CMD_MASK: u32 = 1;
pub const IORING_FSYNC_DATASYNC: u32 = 1;
pub const IORING_TIMEOUT_ABS: u32 = 1;
pub const IORING_TIMEOUT_UPDATE: u32 = 2;
pub const IORING_TIMEOUT_BOOTTIME: u32 = 4;
pub const IORING_TIMEOUT_REALTIME: u32 = 8;
pub const IORING_LINK_TIMEOUT_UPDATE: u32 = 16;
pub const IORING_TIMEOUT_ETIME_SUCCESS: u32 = 32;
pub const IORING_TIMEOUT_MULTISHOT: u32 = 64;
pub const IORING_TIMEOUT_CLOCK_MASK: u32 = 12;
pub const IORING_TIMEOUT_UPDATE_MASK: u32 = 18;
pub const IORING_POLL_ADD_MULTI: u32 = 1;
pub const IORING_POLL_UPDATE_EVENTS: u32 = 2;
pub const IORING_POLL_UPDATE_USER_DATA: u32 = 4;
pub const IORING_POLL_ADD_LEVEL: u32 = 8;
pub const IORING_ASYNC_CANCEL_ALL: u32 = 1;
pub const IORING_ASYNC_CANCEL_FD: u32 = 2;
pub const IORING_ASYNC_CANCEL_ANY: u32 = 4;
pub const IORING_ASYNC_CANCEL_FD_FIXED: u32 = 8;
pub const IORING_ASYNC_CANCEL_USERDATA: u32 = 16;
pub const IORING_ASYNC_CANCEL_OP: u32 = 32;
pub const IORING_RECVSEND_POLL_FIRST: u32 = 1;
pub const IORING_RECV_MULTISHOT: u32 = 2;
pub const IORING_RECVSEND_FIXED_BUF: u32 = 4;
pub const IORING_SEND_ZC_REPORT_USAGE: u32 = 8;
pub const IORING_RECVSEND_BUNDLE: u32 = 16;
pub const IORING_NOTIF_USAGE_ZC_COPIED: u32 = 2147483648;
pub const IORING_ACCEPT_MULTISHOT: u32 = 1;
pub const IORING_ACCEPT_DONTWAIT: u32 = 2;
pub const IORING_ACCEPT_POLL_FIRST: u32 = 4;
pub const IORING_MSG_RING_CQE_SKIP: u32 = 1;
pub const IORING_MSG_RING_FLAGS_PASS: u32 = 2;
pub const IORING_FIXED_FD_NO_CLOEXEC: u32 = 1;
pub const IORING_NOP_INJECT_RESULT: u32 = 1;
pub const IORING_NOP_FILE: u32 = 2;
pub const IORING_NOP_FIXED_FILE: u32 = 4;
pub const IORING_NOP_FIXED_BUFFER: u32 = 8;
pub const IORING_CQE_F_BUFFER: u32 = 1;
pub const IORING_CQE_F_MORE: u32 = 2;
pub const IORING_CQE_F_SOCK_NONEMPTY: u32 = 4;
pub const IORING_CQE_F_NOTIF: u32 = 8;
pub const IORING_CQE_F_BUF_MORE: u32 = 16;
pub const IORING_CQE_BUFFER_SHIFT: u32 = 16;
pub const IORING_OFF_SQ_RING: u32 = 0;
pub const IORING_OFF_CQ_RING: u32 = 134217728;
pub const IORING_OFF_SQES: u32 = 268435456;
pub const IORING_OFF_PBUF_RING: u32 = 2147483648;
pub const IORING_OFF_PBUF_SHIFT: u32 = 16;
pub const IORING_OFF_MMAP_MASK: u32 = 4160749568;
pub const IORING_SQ_NEED_WAKEUP: u32 = 1;
pub const IORING_SQ_CQ_OVERFLOW: u32 = 2;
pub const IORING_SQ_TASKRUN: u32 = 4;
pub const IORING_CQ_EVENTFD_DISABLED: u32 = 1;
pub const IORING_ENTER_GETEVENTS: u32 = 1;
pub const IORING_ENTER_SQ_WAKEUP: u32 = 2;
pub const IORING_ENTER_SQ_WAIT: u32 = 4;
pub const IORING_ENTER_EXT_ARG: u32 = 8;
pub const IORING_ENTER_REGISTERED_RING: u32 = 16;
pub const IORING_ENTER_ABS_TIMER: u32 = 32;
pub const IORING_ENTER_EXT_ARG_REG: u32 = 64;
pub const IORING_FEAT_SINGLE_MMAP: u32 = 1;
pub const IORING_FEAT_NODROP: u32 = 2;
pub const IORING_FEAT_SUBMIT_STABLE: u32 = 4;
pub const IORING_FEAT_RW_CUR_POS: u32 = 8;
pub const IORING_FEAT_CUR_PERSONALITY: u32 = 16;
pub const IORING_FEAT_FAST_POLL: u32 = 32;
pub const IORING_FEAT_POLL_32BITS: u32 = 64;
pub const IORING_FEAT_SQPOLL_NONFIXED: u32 = 128;
pub const IORING_FEAT_EXT_ARG: u32 = 256;
pub const IORING_FEAT_NATIVE_WORKERS: u32 = 512;
pub const IORING_FEAT_RSRC_TAGS: u32 = 1024;
pub const IORING_FEAT_CQE_SKIP: u32 = 2048;
pub const IORING_FEAT_LINKED_FILE: u32 = 4096;
pub const IORING_FEAT_REG_REG_RING: u32 = 8192;
pub const IORING_FEAT_RECVSEND_BUNDLE: u32 = 16384;
pub const IORING_FEAT_MIN_TIMEOUT: u32 = 32768;
pub const IORING_RSRC_REGISTER_SPARSE: u32 = 1;
pub const IORING_REGISTER_FILES_SKIP: i32 = -2;
pub const IO_URING_OP_SUPPORTED: u32 = 1;
pub type __u8 = ::std::os::raw::c_uchar;
pub type __u16 = ::std::os::raw::c_ushort;
pub type __s32 = ::std::os::raw::c_int;
pub type __u32 = ::std::os::raw::c_uint;
pub type __u64 = ::std::os::raw::c_ulonglong;
pub type __kernel_time64_t = ::std::os::raw::c_longlong;
pub type __kernel_rwf_t = ::std::os::raw::c_int;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct __kernel_timespec {
pub tv_sec: __kernel_time64_t,
pub tv_nsec: ::std::os::raw::c_longlong,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of __kernel_timespec"][::std::mem::size_of::<__kernel_timespec>() - 16usize];
["Alignment of __kernel_timespec"][::std::mem::align_of::<__kernel_timespec>() - 8usize];
["Offset of field: __kernel_timespec::tv_sec"]
[::std::mem::offset_of!(__kernel_timespec, tv_sec) - 0usize];
["Offset of field: __kernel_timespec::tv_nsec"]
[::std::mem::offset_of!(__kernel_timespec, tv_nsec) - 8usize];
};
#[repr(C)]
#[derive(Copy, Clone)]
pub struct io_uring_sqe {
pub opcode: __u8,
pub flags: __u8,
pub ioprio: __u16,
pub fd: __s32,
pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_1,
pub __bindgen_anon_2: io_uring_sqe__bindgen_ty_2,
pub len: __u32,
pub __bindgen_anon_3: io_uring_sqe__bindgen_ty_3,
pub user_data: __u64,
pub __bindgen_anon_4: io_uring_sqe__bindgen_ty_4,
pub personality: __u16,
pub __bindgen_anon_5: io_uring_sqe__bindgen_ty_5,
pub __bindgen_anon_6: io_uring_sqe__bindgen_ty_6,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union io_uring_sqe__bindgen_ty_1 {
pub off: __u64,
pub addr2: __u64,
pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_1__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct io_uring_sqe__bindgen_ty_1__bindgen_ty_1 {
pub cmd_op: __u32,
pub __pad1: __u32,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_1__bindgen_ty_1"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_1__bindgen_ty_1>() - 8usize];
["Alignment of io_uring_sqe__bindgen_ty_1__bindgen_ty_1"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_1__bindgen_ty_1>() - 4usize];
["Offset of field: io_uring_sqe__bindgen_ty_1__bindgen_ty_1::cmd_op"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_1__bindgen_ty_1, cmd_op) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_1__bindgen_ty_1::__pad1"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_1__bindgen_ty_1, __pad1) - 4usize];
};
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_1"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_1>() - 8usize];
["Alignment of io_uring_sqe__bindgen_ty_1"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_1>() - 8usize];
["Offset of field: io_uring_sqe__bindgen_ty_1::off"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_1, off) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_1::addr2"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_1, addr2) - 0usize];
};
impl Default for io_uring_sqe__bindgen_ty_1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union io_uring_sqe__bindgen_ty_2 {
pub addr: __u64,
pub splice_off_in: __u64,
pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_2__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct io_uring_sqe__bindgen_ty_2__bindgen_ty_1 {
pub level: __u32,
pub optname: __u32,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_2__bindgen_ty_1"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_2__bindgen_ty_1>() - 8usize];
["Alignment of io_uring_sqe__bindgen_ty_2__bindgen_ty_1"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_2__bindgen_ty_1>() - 4usize];
["Offset of field: io_uring_sqe__bindgen_ty_2__bindgen_ty_1::level"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_2__bindgen_ty_1, level) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_2__bindgen_ty_1::optname"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_2__bindgen_ty_1, optname) - 4usize];
};
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_2"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_2>() - 8usize];
["Alignment of io_uring_sqe__bindgen_ty_2"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_2>() - 8usize];
["Offset of field: io_uring_sqe__bindgen_ty_2::addr"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_2, addr) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_2::splice_off_in"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_2, splice_off_in) - 0usize];
};
impl Default for io_uring_sqe__bindgen_ty_2 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union io_uring_sqe__bindgen_ty_3 {
pub rw_flags: __kernel_rwf_t,
pub fsync_flags: __u32,
pub poll_events: __u16,
pub poll32_events: __u32,
pub sync_range_flags: __u32,
pub msg_flags: __u32,
pub timeout_flags: __u32,
pub accept_flags: __u32,
pub cancel_flags: __u32,
pub open_flags: __u32,
pub statx_flags: __u32,
pub fadvise_advice: __u32,
pub splice_flags: __u32,
pub rename_flags: __u32,
pub unlink_flags: __u32,
pub hardlink_flags: __u32,
pub xattr_flags: __u32,
pub msg_ring_flags: __u32,
pub uring_cmd_flags: __u32,
pub waitid_flags: __u32,
pub futex_flags: __u32,
pub install_fd_flags: __u32,
pub nop_flags: __u32,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_3"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_3>() - 4usize];
["Alignment of io_uring_sqe__bindgen_ty_3"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_3>() - 4usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::rw_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, rw_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::fsync_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, fsync_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::poll_events"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, poll_events) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::poll32_events"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, poll32_events) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::sync_range_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, sync_range_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::msg_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, msg_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::timeout_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, timeout_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::accept_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, accept_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::cancel_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, cancel_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::open_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, open_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::statx_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, statx_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::fadvise_advice"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, fadvise_advice) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::splice_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, splice_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::rename_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, rename_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::unlink_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, unlink_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::hardlink_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, hardlink_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::xattr_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, xattr_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::msg_ring_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, msg_ring_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::uring_cmd_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, uring_cmd_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::waitid_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, waitid_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::futex_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, futex_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::install_fd_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, install_fd_flags) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_3::nop_flags"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_3, nop_flags) - 0usize];
};
impl Default for io_uring_sqe__bindgen_ty_3 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub union io_uring_sqe__bindgen_ty_4 {
pub buf_index: __u16,
pub buf_group: __u16,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_4"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_4>() - 2usize];
["Alignment of io_uring_sqe__bindgen_ty_4"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_4>() - 1usize];
["Offset of field: io_uring_sqe__bindgen_ty_4::buf_index"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_4, buf_index) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_4::buf_group"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_4, buf_group) - 0usize];
};
impl Default for io_uring_sqe__bindgen_ty_4 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union io_uring_sqe__bindgen_ty_5 {
pub splice_fd_in: __s32,
pub file_index: __u32,
pub optlen: __u32,
pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_5__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct io_uring_sqe__bindgen_ty_5__bindgen_ty_1 {
pub addr_len: __u16,
pub __pad3: [__u16; 1usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_5__bindgen_ty_1"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_5__bindgen_ty_1>() - 4usize];
["Alignment of io_uring_sqe__bindgen_ty_5__bindgen_ty_1"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_5__bindgen_ty_1>() - 2usize];
["Offset of field: io_uring_sqe__bindgen_ty_5__bindgen_ty_1::addr_len"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_5__bindgen_ty_1, addr_len) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_5__bindgen_ty_1::__pad3"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_5__bindgen_ty_1, __pad3) - 2usize];
};
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_5"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_5>() - 4usize];
["Alignment of io_uring_sqe__bindgen_ty_5"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_5>() - 4usize];
["Offset of field: io_uring_sqe__bindgen_ty_5::splice_fd_in"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_5, splice_fd_in) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_5::file_index"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_5, file_index) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_5::optlen"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_5, optlen) - 0usize];
};
impl Default for io_uring_sqe__bindgen_ty_5 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct io_uring_sqe__bindgen_ty_6 {
pub __bindgen_anon_1: __BindgenUnionField<io_uring_sqe__bindgen_ty_6__bindgen_ty_1>,
pub optval: __BindgenUnionField<__u64>,
pub cmd: __BindgenUnionField<[__u8; 0usize]>,
pub bindgen_union_field: [u64; 2usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct io_uring_sqe__bindgen_ty_6__bindgen_ty_1 {
pub addr3: __u64,
pub __pad2: [__u64; 1usize],
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_6__bindgen_ty_1"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_6__bindgen_ty_1>() - 16usize];
["Alignment of io_uring_sqe__bindgen_ty_6__bindgen_ty_1"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_6__bindgen_ty_1>() - 8usize];
["Offset of field: io_uring_sqe__bindgen_ty_6__bindgen_ty_1::addr3"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_6__bindgen_ty_1, addr3) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_6__bindgen_ty_1::__pad2"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_6__bindgen_ty_1, __pad2) - 8usize];
};
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe__bindgen_ty_6"]
[::std::mem::size_of::<io_uring_sqe__bindgen_ty_6>() - 16usize];
["Alignment of io_uring_sqe__bindgen_ty_6"]
[::std::mem::align_of::<io_uring_sqe__bindgen_ty_6>() - 8usize];
["Offset of field: io_uring_sqe__bindgen_ty_6::optval"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_6, optval) - 0usize];
["Offset of field: io_uring_sqe__bindgen_ty_6::cmd"]
[::std::mem::offset_of!(io_uring_sqe__bindgen_ty_6, cmd) - 0usize];
};
impl Default for io_uring_sqe__bindgen_ty_6 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_sqe"][::std::mem::size_of::<io_uring_sqe>() - 64usize];
["Alignment of io_uring_sqe"][::std::mem::align_of::<io_uring_sqe>() - 8usize];
["Offset of field: io_uring_sqe::opcode"]
[::std::mem::offset_of!(io_uring_sqe, opcode) - 0usize];
["Offset of field: io_uring_sqe::flags"][::std::mem::offset_of!(io_uring_sqe, flags) - 1usize];
["Offset of field: io_uring_sqe::ioprio"]
[::std::mem::offset_of!(io_uring_sqe, ioprio) - 2usize];
["Offset of field: io_uring_sqe::fd"][::std::mem::offset_of!(io_uring_sqe, fd) - 4usize];
["Offset of field: io_uring_sqe::len"][::std::mem::offset_of!(io_uring_sqe, len) - 24usize];
["Offset of field: io_uring_sqe::user_data"]
[::std::mem::offset_of!(io_uring_sqe, user_data) - 32usize];
["Offset of field: io_uring_sqe::personality"]
[::std::mem::offset_of!(io_uring_sqe, personality) - 42usize];
};
impl Default for io_uring_sqe {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
pub mod io_uring_sqe_flags_bit {
pub type Type = ::std::os::raw::c_uint;
pub const IOSQE_FIXED_FILE_BIT: Type = 0;
pub const IOSQE_IO_DRAIN_BIT: Type = 1;
pub const IOSQE_IO_LINK_BIT: Type = 2;
pub const IOSQE_IO_HARDLINK_BIT: Type = 3;
pub const IOSQE_ASYNC_BIT: Type = 4;
pub const IOSQE_BUFFER_SELECT_BIT: Type = 5;
pub const IOSQE_CQE_SKIP_SUCCESS_BIT: Type = 6;
}
pub mod io_uring_op {
pub type Type = ::std::os::raw::c_uint;
pub const IORING_OP_NOP: Type = 0;
pub const IORING_OP_READV: Type = 1;
pub const IORING_OP_WRITEV: Type = 2;
pub const IORING_OP_FSYNC: Type = 3;
pub const IORING_OP_READ_FIXED: Type = 4;
pub const IORING_OP_WRITE_FIXED: Type = 5;
pub const IORING_OP_POLL_ADD: Type = 6;
pub const IORING_OP_POLL_REMOVE: Type = 7;
pub const IORING_OP_SYNC_FILE_RANGE: Type = 8;
pub const IORING_OP_SENDMSG: Type = 9;
pub const IORING_OP_RECVMSG: Type = 10;
pub const IORING_OP_TIMEOUT: Type = 11;
pub const IORING_OP_TIMEOUT_REMOVE: Type = 12;
pub const IORING_OP_ACCEPT: Type = 13;
pub const IORING_OP_ASYNC_CANCEL: Type = 14;
pub const IORING_OP_LINK_TIMEOUT: Type = 15;
pub const IORING_OP_CONNECT: Type = 16;
pub const IORING_OP_FALLOCATE: Type = 17;
pub const IORING_OP_OPENAT: Type = 18;
pub const IORING_OP_CLOSE: Type = 19;
pub const IORING_OP_FILES_UPDATE: Type = 20;
pub const IORING_OP_STATX: Type = 21;
pub const IORING_OP_READ: Type = 22;
pub const IORING_OP_WRITE: Type = 23;
pub const IORING_OP_FADVISE: Type = 24;
pub const IORING_OP_MADVISE: Type = 25;
pub const IORING_OP_SEND: Type = 26;
pub const IORING_OP_RECV: Type = 27;
pub const IORING_OP_OPENAT2: Type = 28;
pub const IORING_OP_EPOLL_CTL: Type = 29;
pub const IORING_OP_SPLICE: Type = 30;
pub const IORING_OP_PROVIDE_BUFFERS: Type = 31;
pub const IORING_OP_REMOVE_BUFFERS: Type = 32;
pub const IORING_OP_TEE: Type = 33;
pub const IORING_OP_SHUTDOWN: Type = 34;
pub const IORING_OP_RENAMEAT: Type = 35;
pub const IORING_OP_UNLINKAT: Type = 36;
pub const IORING_OP_MKDIRAT: Type = 37;
pub const IORING_OP_SYMLINKAT: Type = 38;
pub const IORING_OP_LINKAT: Type = 39;
pub const IORING_OP_MSG_RING: Type = 40;
pub const IORING_OP_FSETXATTR: Type = 41;
pub const IORING_OP_SETXATTR: Type = 42;
pub const IORING_OP_FGETXATTR: Type = 43;
pub const IORING_OP_GETXATTR: Type = 44;
pub const IORING_OP_SOCKET: Type = 45;
pub const IORING_OP_URING_CMD: Type = 46;
pub const IORING_OP_SEND_ZC: Type = 47;
pub const IORING_OP_SENDMSG_ZC: Type = 48;
pub const IORING_OP_READ_MULTISHOT: Type = 49;
pub const IORING_OP_WAITID: Type = 50;
pub const IORING_OP_FUTEX_WAIT: Type = 51;
pub const IORING_OP_FUTEX_WAKE: Type = 52;
pub const IORING_OP_FUTEX_WAITV: Type = 53;
pub const IORING_OP_FIXED_FD_INSTALL: Type = 54;
pub const IORING_OP_FTRUNCATE: Type = 55;
pub const IORING_OP_BIND: Type = 56;
pub const IORING_OP_LISTEN: Type = 57;
pub const IORING_OP_LAST: Type = 58;
}
pub mod io_uring_msg_ring_flags {
pub type Type = ::std::os::raw::c_uint;
pub const IORING_MSG_DATA: Type = 0;
pub const IORING_MSG_SEND_FD: Type = 1;
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
pub struct io_uring_cqe {
pub user_data: __u64,
pub res: __s32,
pub flags: __u32,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_cqe"][::std::mem::size_of::<io_uring_cqe>() - 16usize];
["Alignment of io_uring_cqe"][::std::mem::align_of::<io_uring_cqe>() - 8usize];
["Offset of field: io_uring_cqe::user_data"]
[::std::mem::offset_of!(io_uring_cqe, user_data) - 0usize];
["Offset of field: io_uring_cqe::res"][::std::mem::offset_of!(io_uring_cqe, res) - 8usize];
["Offset of field: io_uring_cqe::flags"][::std::mem::offset_of!(io_uring_cqe, flags) - 12usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct io_sqring_offsets {
pub head: __u32,
pub tail: __u32,
pub ring_mask: __u32,
pub ring_entries: __u32,
pub flags: __u32,
pub dropped: __u32,
pub array: __u32,
pub resv1: __u32,
pub user_addr: __u64,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_sqring_offsets"][::std::mem::size_of::<io_sqring_offsets>() - 40usize];
["Alignment of io_sqring_offsets"][::std::mem::align_of::<io_sqring_offsets>() - 8usize];
["Offset of field: io_sqring_offsets::head"]
[::std::mem::offset_of!(io_sqring_offsets, head) - 0usize];
["Offset of field: io_sqring_offsets::tail"]
[::std::mem::offset_of!(io_sqring_offsets, tail) - 4usize];
["Offset of field: io_sqring_offsets::ring_mask"]
[::std::mem::offset_of!(io_sqring_offsets, ring_mask) - 8usize];
["Offset of field: io_sqring_offsets::ring_entries"]
[::std::mem::offset_of!(io_sqring_offsets, ring_entries) - 12usize];
["Offset of field: io_sqring_offsets::flags"]
[::std::mem::offset_of!(io_sqring_offsets, flags) - 16usize];
["Offset of field: io_sqring_offsets::dropped"]
[::std::mem::offset_of!(io_sqring_offsets, dropped) - 20usize];
["Offset of field: io_sqring_offsets::array"]
[::std::mem::offset_of!(io_sqring_offsets, array) - 24usize];
["Offset of field: io_sqring_offsets::resv1"]
[::std::mem::offset_of!(io_sqring_offsets, resv1) - 28usize];
["Offset of field: io_sqring_offsets::user_addr"]
[::std::mem::offset_of!(io_sqring_offsets, user_addr) - 32usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct io_cqring_offsets {
pub head: __u32,
pub tail: __u32,
pub ring_mask: __u32,
pub ring_entries: __u32,
pub overflow: __u32,
pub cqes: __u32,
pub flags: __u32,
pub resv1: __u32,
pub user_addr: __u64,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_cqring_offsets"][::std::mem::size_of::<io_cqring_offsets>() - 40usize];
["Alignment of io_cqring_offsets"][::std::mem::align_of::<io_cqring_offsets>() - 8usize];
["Offset of field: io_cqring_offsets::head"]
[::std::mem::offset_of!(io_cqring_offsets, head) - 0usize];
["Offset of field: io_cqring_offsets::tail"]
[::std::mem::offset_of!(io_cqring_offsets, tail) - 4usize];
["Offset of field: io_cqring_offsets::ring_mask"]
[::std::mem::offset_of!(io_cqring_offsets, ring_mask) - 8usize];
["Offset of field: io_cqring_offsets::ring_entries"]
[::std::mem::offset_of!(io_cqring_offsets, ring_entries) - 12usize];
["Offset of field: io_cqring_offsets::overflow"]
[::std::mem::offset_of!(io_cqring_offsets, overflow) - 16usize];
["Offset of field: io_cqring_offsets::cqes"]
[::std::mem::offset_of!(io_cqring_offsets, cqes) - 20usize];
["Offset of field: io_cqring_offsets::flags"]
[::std::mem::offset_of!(io_cqring_offsets, flags) - 24usize];
["Offset of field: io_cqring_offsets::resv1"]
[::std::mem::offset_of!(io_cqring_offsets, resv1) - 28usize];
["Offset of field: io_cqring_offsets::user_addr"]
[::std::mem::offset_of!(io_cqring_offsets, user_addr) - 32usize];
};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct io_uring_params {
pub sq_entries: __u32,
pub cq_entries: __u32,
pub flags: __u32,
pub sq_thread_cpu: __u32,
pub sq_thread_idle: __u32,
pub features: __u32,
pub wq_fd: __u32,
pub resv: [__u32; 3usize],
pub sq_off: io_sqring_offsets,
pub cq_off: io_cqring_offsets,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of io_uring_params"][::std::mem::size_of::<io_uring_params>() - 120usize];
["Alignment of io_uring_params"][::std::mem::align_of::<io_uring_params>() - 8usize];
["Offset of field: io_uring_params::sq_entries"]
[::std::mem::offset_of!(io_uring_params, sq_entries) - 0usize];
["Offset of field: io_uring_params::cq_entries"]
[::std::mem::offset_of!(io_uring_params, cq_entries) - 4usize];
["Offset of field: io_uring_params::flags"]
[::std::mem::offset_of!(io_uring_params, flags) - 8usize];
["Offset of field: io_uring_params::sq_thread_cpu"]
[::std::mem::offset_of!(io_uring_params, sq_thread_cpu) - 12usize];
["Offset of field: io_uring_params::sq_thread_idle"]
[::std::mem::offset_of!(io_uring_params, sq_thread_idle) - 16usize];
["Offset of field: io_uring_params::features"]
[::std::mem::offset_of!(io_uring_params, features) - 20usize];
["Offset of field: io_uring_params::wq_fd"]
[::std::mem::offset_of!(io_uring_params, wq_fd) - 24usize];
["Offset of field: io_uring_params::resv"]
[::std::mem::offset_of!(io_uring_params, resv) - 28usize];
["Offset of field: io_uring_params::sq_off"]
[::std::mem::offset_of!(io_uring_params, sq_off) - 40usize];
["Offset of field: io_uring_params::cq_off"]
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/operation/sqe.rs | src/vmm/src/io_uring/operation/sqe.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::{self};
use crate::io_uring::generated::io_uring_sqe;
use crate::vstate::memory::ByteValued;
// SAFETY: Struct is POD and contains no references or niches.
unsafe impl ByteValued for io_uring_sqe {}
/// Newtype wrapper over a raw sqe.
pub(crate) struct Sqe(pub(crate) io_uring_sqe);
impl fmt::Debug for Sqe {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Sqe").finish()
}
}
impl Sqe {
/// Construct a new sqe.
pub(crate) fn new(inner: io_uring_sqe) -> Self {
Self(inner)
}
/// Return the key to the `user_data` stored in slab.
pub(crate) fn user_data(&self) -> u64 {
self.0.user_data
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use super::*;
#[test]
fn test_user_data() {
let user_data = 10_u64;
let mut inner: io_uring_sqe = unsafe { std::mem::zeroed() };
inner.user_data = user_data;
let sqe: Sqe = Sqe::new(inner);
assert_eq!(sqe.user_data(), 10);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/operation/cqe.rs | src/vmm/src/io_uring/operation/cqe.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use crate::io_uring::generated::io_uring_cqe;
use crate::vstate::memory::ByteValued;
// SAFETY: Struct is POD and contains no references or niches.
unsafe impl ByteValued for io_uring_cqe {}
/// Wrapper over a completed operation.
#[derive(Debug)]
pub struct Cqe<T> {
res: i32,
user_data: T,
}
impl<T: Debug> Cqe<T> {
/// Construct a Cqe object.
pub fn new(res: i32, user_data: T) -> Self {
Self { res, user_data }
}
/// Return the number of bytes successfully transferred by this operation.
pub fn count(&self) -> u32 {
u32::try_from(self.res).unwrap_or(0)
}
/// Return the result associated to the IO operation.
pub fn result(&self) -> Result<u32, std::io::Error> {
let res = self.res;
if res < 0 {
Err(std::io::Error::from_raw_os_error(res))
} else {
Ok(u32::try_from(self.res).unwrap())
}
}
/// Create a new Cqe, applying the passed function to the user_data.
pub fn map_user_data<U: Debug, F: FnOnce(T) -> U>(self, op: F) -> Cqe<U> {
Cqe {
res: self.res,
user_data: op(self.user_data()),
}
}
/// Consume the object and return the user_data.
pub fn user_data(self) -> T {
self.user_data
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use super::*;
#[test]
fn test_result() {
// Check that `result()` returns an `Error` when `res` is negative.
{
let user_data = 10_u8;
let cqe: Cqe<u8> = Cqe::new(-22, user_data);
assert_eq!(
cqe.result().unwrap_err().kind(),
std::io::Error::from_raw_os_error(-22).kind()
);
}
// Check that `result()` returns Ok() when `res` is positive.
{
let user_data = 10_u8;
let cqe: Cqe<u8> = Cqe::new(128, user_data);
assert_eq!(cqe.result().unwrap(), 128);
}
}
#[test]
fn test_user_data() {
let user_data = 10_u8;
let cqe: Cqe<u8> = Cqe::new(0, user_data);
assert_eq!(cqe.user_data(), 10);
}
#[test]
fn test_map_user_data() {
let user_data = 10_u8;
let cqe: Cqe<u8> = Cqe::new(0, user_data);
let cqe = cqe.map_user_data(|x| x + 1);
assert_eq!(cqe.user_data(), 11);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/operation/mod.rs | src/vmm/src/io_uring/operation/mod.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Module exposing data structures for working with io_uring operations.
mod cqe;
mod sqe;
use std::convert::From;
use std::fmt::{self, Debug};
pub use cqe::Cqe;
pub(crate) use sqe::Sqe;
use crate::io_uring::generated::{io_uring_op, io_uring_sqe, io_uring_sqe_flags_bit};
/// The index of a registered fd.
pub type FixedFd = u32;
#[repr(u8)]
#[derive(Debug, Clone, Copy)]
// These constants are generated as u32, but we use u8; const try_from() is unstable
#[allow(clippy::cast_possible_truncation)]
/// Supported operation types.
pub enum OpCode {
/// Read operation.
Read = io_uring_op::IORING_OP_READ as u8,
/// Write operation.
Write = io_uring_op::IORING_OP_WRITE as u8,
/// Fsync operation.
Fsync = io_uring_op::IORING_OP_FSYNC as u8,
}
// Useful for outputting errors.
impl From<OpCode> for &'static str {
fn from(opcode: OpCode) -> Self {
match opcode {
OpCode::Read => "read",
OpCode::Write => "write",
OpCode::Fsync => "fsync",
}
}
}
/// Operation type for populating the submission queue, parametrised with the `user_data` type `T`.
/// The `user_data` is used for identifying the operation once completed.
pub struct Operation<T> {
fd: FixedFd,
pub(crate) opcode: OpCode,
pub(crate) addr: Option<usize>,
pub(crate) len: Option<u32>,
flags: u8,
pub(crate) offset: Option<u64>,
pub(crate) user_data: T,
}
// Needed for proptesting.
impl<T> fmt::Debug for Operation<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"
Operation {{
opcode: {:?},
addr: {:?},
len: {:?},
offset: {:?},
}}
",
self.opcode, self.addr, self.len, self.offset
)
}
}
#[allow(clippy::len_without_is_empty)]
impl<T: Debug> Operation<T> {
/// Construct a read operation.
pub fn read(fd: FixedFd, addr: usize, len: u32, offset: u64, user_data: T) -> Self {
Self {
fd,
opcode: OpCode::Read,
addr: Some(addr),
len: Some(len),
flags: 0,
offset: Some(offset),
user_data,
}
}
/// Construct a write operation.
pub fn write(fd: FixedFd, addr: usize, len: u32, offset: u64, user_data: T) -> Self {
Self {
fd,
opcode: OpCode::Write,
addr: Some(addr),
len: Some(len),
flags: 0,
offset: Some(offset),
user_data,
}
}
/// Construct a fsync operation.
pub fn fsync(fd: FixedFd, user_data: T) -> Self {
Self {
fd,
opcode: OpCode::Fsync,
addr: None,
len: None,
flags: 0,
offset: None,
user_data,
}
}
pub(crate) fn fd(&self) -> FixedFd {
self.fd
}
// Needed for proptesting.
#[cfg(test)]
pub(crate) fn set_linked(&mut self) {
self.flags |= 1 << io_uring_sqe_flags_bit::IOSQE_IO_LINK_BIT;
}
/// Transform the operation into an `Sqe`.
/// Note: remember remove user_data from slab or it will leak.
pub(crate) fn into_sqe(self, slab: &mut slab::Slab<T>) -> Sqe {
// SAFETY:
// Safe because all-zero value is valid. The sqe is made up of integers and raw pointers.
let mut inner: io_uring_sqe = unsafe { std::mem::zeroed() };
inner.opcode = self.opcode as u8;
inner.fd = i32::try_from(self.fd).unwrap();
// Simplifying assumption that we only used pre-registered FDs.
inner.flags = self.flags | (1 << io_uring_sqe_flags_bit::IOSQE_FIXED_FILE_BIT);
if let Some(addr) = self.addr {
inner.__bindgen_anon_2.addr = addr as u64;
}
if let Some(len) = self.len {
inner.len = len;
}
if let Some(offset) = self.offset {
inner.__bindgen_anon_1.off = offset;
}
inner.user_data = slab.insert(self.user_data) as u64;
Sqe::new(inner)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/queue/mmap.rs | src/vmm/src/io_uring/queue/mmap.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::io::Error as IOError;
use std::os::unix::io::RawFd;
use vm_memory::mmap::MmapRegionError;
use crate::vstate::memory::MmapRegion;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MmapError {
/// Os: {0}
Os(IOError),
/// BuildMmapRegion: {0}
BuildMmapRegion(MmapRegionError),
}
pub(crate) fn mmap(size: usize, fd: RawFd, offset: i64) -> Result<MmapRegion, MmapError> {
let prot = libc::PROT_READ | libc::PROT_WRITE;
let flags = libc::MAP_SHARED | libc::MAP_POPULATE;
// SAFETY: Safe because values are valid and we check the return value.
let ptr = unsafe { libc::mmap(std::ptr::null_mut(), size, prot, flags, fd, offset) };
if (ptr as isize) < 0 {
return Err(MmapError::Os(IOError::last_os_error()));
}
// SAFETY: Safe because the mmap did not return error.
unsafe {
MmapRegion::build_raw(ptr.cast::<u8>(), size, prot, flags)
.map_err(MmapError::BuildMmapRegion)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/queue/completion.rs | src/vmm/src/io_uring/queue/completion.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use std::num::Wrapping;
use std::os::unix::io::RawFd;
use std::sync::atomic::Ordering;
use vm_memory::{Bytes, VolatileMemory, VolatileMemoryError};
use super::mmap::{MmapError, mmap};
use crate::io_uring::generated;
use crate::io_uring::operation::Cqe;
use crate::vstate::memory::MmapRegion;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// CQueue Error.
pub enum CQueueError {
/// Error mapping the ring: {0}
Mmap(#[from] MmapError),
/// Error reading/writing volatile memory: {0}
VolatileMemory(#[from] VolatileMemoryError),
/// Error in removing data from the slab
SlabRemoveFailed,
}
#[derive(Debug)]
pub(crate) struct CompletionQueue {
// Offsets.
head_off: usize,
tail_off: usize,
cqes_off: usize,
// Cached values.
unmasked_head: Wrapping<u32>,
count: u32,
ring_mask: u32,
// Mmap-ed cqes ring.
cqes: MmapRegion,
}
impl CompletionQueue {
pub(crate) fn new(
io_uring_fd: RawFd,
params: &generated::io_uring_params,
) -> Result<Self, CQueueError> {
let offsets = params.cq_off;
// Map the CQ_ring. The actual size of the ring is `num_entries * size_of(entry_type)`.
// To this we add an offset as per the io_uring specifications.
let ring_size = (params.cq_off.cqes as usize)
+ (params.cq_entries as usize) * std::mem::size_of::<generated::io_uring_cqe>();
let cqes = mmap(ring_size, io_uring_fd, generated::IORING_OFF_CQ_RING.into())?;
let ring = cqes.as_volatile_slice();
let ring_mask = ring.read_obj(offsets.ring_mask as usize)?;
Ok(Self {
// safe because it's an u32 offset
head_off: offsets.head as usize,
// safe because it's an u32 offset
tail_off: offsets.tail as usize,
// safe because it's an u32 offset
cqes_off: offsets.cqes as usize,
// We can init this to 0 and cache it because we are the only ones modifying it.
unmasked_head: Wrapping(0),
count: params.cq_entries,
ring_mask,
cqes,
})
}
pub(crate) fn count(&self) -> u32 {
self.count
}
pub(crate) fn pop<T: Debug>(
&mut self,
slab: &mut slab::Slab<T>,
) -> Result<Option<Cqe<T>>, CQueueError> {
let ring = self.cqes.as_volatile_slice();
// get the head & tail
let head = self.unmasked_head.0 & self.ring_mask;
let unmasked_tail = ring.load::<u32>(self.tail_off, Ordering::Acquire)?;
// validate that we have smth to fetch
if Wrapping(unmasked_tail) - self.unmasked_head > Wrapping(0) {
let cqe: generated::io_uring_cqe = ring.read_obj(
self.cqes_off + (head as usize) * std::mem::size_of::<generated::io_uring_cqe>(),
)?;
// increase the head
self.unmasked_head += Wrapping(1u32);
ring.store(self.unmasked_head.0, self.head_off, Ordering::Release)?;
let res = cqe.res;
#[allow(clippy::cast_possible_truncation)]
let index = cqe.user_data as usize;
match slab.try_remove(index) {
Some(user_data) => Ok(Some(Cqe::new(res, user_data))),
None => Err(CQueueError::SlabRemoveFailed),
}
} else {
Ok(None)
}
}
}
impl Drop for CompletionQueue {
fn drop(&mut self) {
// SAFETY: Safe because parameters are valid.
unsafe { libc::munmap(self.cqes.as_ptr().cast::<libc::c_void>(), self.cqes.size()) };
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/queue/submission.rs | src/vmm/src/io_uring/queue/submission.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use std::io::Error as IOError;
use std::mem;
use std::num::Wrapping;
use std::os::unix::io::RawFd;
use std::sync::atomic::Ordering;
use vm_memory::{VolatileMemory, VolatileMemoryError};
use vmm_sys_util::syscall::SyscallReturnCode;
use super::mmap::{MmapError, mmap};
use crate::io_uring::generated;
use crate::io_uring::operation::Sqe;
use crate::vstate::memory::{Bytes, MmapRegion};
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// SQueue Error.
pub enum SQueueError {
/// The queue is full.
FullQueue,
/// Error mapping the ring: {0}
Mmap(#[from] MmapError),
/// Error reading/writing volatile memory: {0}
VolatileMemory(#[from] VolatileMemoryError),
/// Error returned by `io_uring_enter`: {0}
Submit(#[from] IOError),
}
#[derive(Debug)]
pub(crate) struct SubmissionQueue {
io_uring_fd: RawFd,
// Offsets.
head_off: usize,
tail_off: usize,
// Cached values.
ring_mask: u32,
count: u32,
unmasked_tail: Wrapping<u32>,
// Mmap-ed ring.
ring: MmapRegion,
// Mmap-ed sqes.
sqes: MmapRegion,
// Number of ops yet to be submitted.
to_submit: u32,
}
impl SubmissionQueue {
pub(crate) fn new(
io_uring_fd: RawFd,
params: &generated::io_uring_params,
) -> Result<Self, SQueueError> {
let (ring, sqes) = Self::mmap(io_uring_fd, params)?;
let ring_slice = ring.as_volatile_slice();
// since we don't need the extra layer of indirection, we can simply map the index array
// to be array[i] = i;
let sq_array = ring_slice.offset(params.sq_off.array as usize)?;
for i in 0..params.sq_entries {
sq_array.write_obj(i, mem::size_of::<u32>() * (i as usize))?;
}
let ring_mask = ring_slice.read_obj(params.sq_off.ring_mask as usize)?;
Ok(Self {
io_uring_fd,
head_off: params.sq_off.head as usize,
tail_off: params.sq_off.tail as usize,
ring_mask,
count: params.sq_entries,
// We can init this to 0 and cache it because we are the only ones modifying it.
unmasked_tail: Wrapping(0),
ring,
sqes,
to_submit: 0,
})
}
pub(crate) fn push(&mut self, sqe: Sqe) -> Result<(), (SQueueError, u64)> {
let ring_slice = self.ring.as_volatile_slice();
// get the sqe tail
let tail = self.unmasked_tail.0 & self.ring_mask;
// get the pending sqes
let pending = match self.pending() {
Ok(n) => n,
Err(err) => return Err((err, sqe.user_data())),
};
if pending >= self.count {
return Err((SQueueError::FullQueue, sqe.user_data()));
}
// retrieve and populate the sqe
if let Err(err) = self.sqes.as_volatile_slice().write_obj(
sqe.0,
(tail as usize) * mem::size_of::<generated::io_uring_sqe>(),
) {
return Err((SQueueError::VolatileMemory(err), sqe.user_data()));
}
// increment the sqe tail
self.unmasked_tail += Wrapping(1u32);
if let Err(err) = ring_slice.store(self.unmasked_tail.0, self.tail_off, Ordering::Release) {
return Err((SQueueError::VolatileMemory(err), sqe.user_data()));
}
// This is safe since we already checked if there is enough space in the queue;
self.to_submit += 1;
Ok(())
}
pub(crate) fn submit(&mut self, min_complete: u32) -> Result<u32, SQueueError> {
if self.to_submit == 0 && min_complete == 0 {
// Nothing to submit and nothing to wait for.
return Ok(0);
}
let mut flags = 0;
if min_complete > 0 {
flags |= generated::IORING_ENTER_GETEVENTS;
}
// SAFETY: Safe because values are valid and we check the return value.
let submitted = SyscallReturnCode(unsafe {
libc::syscall(
libc::SYS_io_uring_enter,
self.io_uring_fd,
self.to_submit,
min_complete,
flags,
std::ptr::null::<libc::sigset_t>(),
)
})
.into_result()?;
// It's safe to convert to u32 since the syscall didn't return an error.
let submitted = u32::try_from(submitted).unwrap();
// This is safe since submitted <= self.to_submit. However we use a saturating_sub
// for extra safety.
self.to_submit = self.to_submit.saturating_sub(submitted);
Ok(submitted)
}
fn mmap(
io_uring_fd: RawFd,
params: &generated::io_uring_params,
) -> Result<(MmapRegion, MmapRegion), SQueueError> {
// map the SQ_ring. The actual size of the ring is `num_entries * size_of(entry_type)`.
// To this we add an offset as per the io_uring specifications.
let sqe_ring_size =
(params.sq_off.array as usize) + (params.sq_entries as usize) * mem::size_of::<u32>();
let sqe_ring = mmap(
sqe_ring_size,
io_uring_fd,
generated::IORING_OFF_SQ_RING.into(),
)?;
// map the SQEs.
let sqes_array_size =
(params.sq_entries as usize) * mem::size_of::<generated::io_uring_sqe>();
let sqes = mmap(
sqes_array_size,
io_uring_fd,
generated::IORING_OFF_SQES.into(),
)?;
Ok((sqe_ring, sqes))
}
pub(crate) fn pending(&self) -> Result<u32, SQueueError> {
let ring_slice = self.ring.as_volatile_slice();
// get the sqe head
let unmasked_head = ring_slice.load::<u32>(self.head_off, Ordering::Acquire)?;
Ok((self.unmasked_tail - Wrapping(unmasked_head)).0)
}
}
impl Drop for SubmissionQueue {
fn drop(&mut self) {
// SAFETY: Safe because parameters are valid.
unsafe { libc::munmap(self.ring.as_ptr().cast::<libc::c_void>(), self.ring.size()) };
// SAFETY: Safe because parameters are valid.
unsafe { libc::munmap(self.sqes.as_ptr().cast::<libc::c_void>(), self.sqes.size()) };
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/io_uring/queue/mod.rs | src/vmm/src/io_uring/queue/mod.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod completion;
mod mmap;
pub mod submission;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/mod.rs | src/vmm/src/dumbo/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Provides helper logic for parsing and writing protocol data units, and minimalist
//! implementations of a TCP listener, a TCP connection, and an HTTP/1.1 server.
pub mod pdu;
pub mod tcp;
use std::ops::Index;
pub use crate::dumbo::pdu::arp::{ETH_IPV4_FRAME_LEN, EthIPv4ArpFrame};
pub use crate::dumbo::pdu::ethernet::{
ETHERTYPE_ARP, ETHERTYPE_IPV4, EthernetFrame, PAYLOAD_OFFSET as ETHERNET_PAYLOAD_OFFSET,
};
pub use crate::dumbo::pdu::ipv4::{IPv4Packet, PROTOCOL_TCP, PROTOCOL_UDP};
pub use crate::dumbo::pdu::udp::{UDP_HEADER_SIZE, UdpDatagram};
use crate::utils::net::mac::MacAddr;
/// Represents a generalization of a borrowed `[u8]` slice.
#[allow(clippy::len_without_is_empty)]
pub trait ByteBuffer: Index<usize, Output = u8> {
/// Returns the length of the buffer.
fn len(&self) -> usize;
/// Reads `buf.len()` bytes from `self` into `buf`, starting at `offset`.
///
/// # Panics
///
/// Panics if `offset + buf.len()` > `self.len()`.
fn read_to_slice(&self, offset: usize, buf: &mut [u8]);
}
impl ByteBuffer for [u8] {
#[inline]
fn len(&self) -> usize {
self.len()
}
#[inline]
fn read_to_slice(&self, offset: usize, buf: &mut [u8]) {
let buf_len = buf.len();
buf.copy_from_slice(&self[offset..offset + buf_len]);
}
}
#[cfg(test)]
mod tests {
use std::fmt::Debug;
use super::*;
fn bb_len<T: ByteBuffer + ?Sized + Debug>(buf: &T) -> usize {
buf.len()
}
fn bb_is_empty<T: ByteBuffer + ?Sized + Debug>(buf: &T) -> bool {
buf.len() == 0
}
fn bb_read_from_1<T: ByteBuffer + ?Sized + Debug>(src: &T, dst: &mut [u8]) {
src.read_to_slice(1, dst);
}
#[test]
fn test_u8_byte_buffer() {
let a = [1u8, 2, 3];
let mut b = [0u8; 2];
assert_eq!(bb_len(a.as_ref()), a.len());
assert!(!bb_is_empty(a.as_ref()));
bb_read_from_1(a.as_ref(), b.as_mut());
assert_eq!(b, [2, 3]);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/tcp/connection.rs | src/vmm/src/dumbo/tcp/connection.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! This module contains a minimalist TCP [`Connection`] implementation, which only supports
//! passive open scenarios, and some auxiliary logic and data structures.
//!
//! [`Connection`]: struct.Connection.html
use std::fmt::Debug;
use std::num::{NonZeroU16, NonZeroU64, NonZeroUsize, Wrapping};
use bitflags::bitflags;
use vmm_sys_util::rand::xor_pseudo_rng_u32;
use crate::dumbo::ByteBuffer;
use crate::dumbo::pdu::Incomplete;
use crate::dumbo::pdu::bytes::NetworkBytes;
use crate::dumbo::pdu::tcp::{Flags as TcpFlags, TcpError as TcpSegmentError, TcpSegment};
use crate::dumbo::tcp::{
MAX_WINDOW_SIZE, MSS_DEFAULT, NextSegmentStatus, RstConfig, seq_after, seq_at_or_after,
};
bitflags! {
// We use a set of flags, instead of a state machine, to represent the connection status. Some
// parts of the status information are reflected in other fields of the Connection struct, such
// as Connection::fin_received.
#[derive(Debug, Clone, PartialEq)]
struct ConnStatusFlags: u8 {
const SYN_RECEIVED = 1;
const SYNACK_SENT = 1 << 1;
const ESTABLISHED = 1 << 2;
// We signal the end of the TX half by setting Connection.send_fin to Some(sequence_number),
// and use this flag to record that at least one FIN segment has been sent.
const FIN_SENT = 1 << 3;
// The other endpoint has ACKed our FIN.
const FIN_ACKED = 1 << 4;
// The connection is reset, because we either sent, or received a RST segment.
const RESET = 1 << 5;
}
}
bitflags! {
/// Represents any unusual conditions which may occur when receiving a TCP segment.
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct RecvStatusFlags: u16 {
/// The acknowledgement number is invalid.
const INVALID_ACK = 1;
/// The connection received a duplicate ACK.
const DUP_ACK = 1 << 1;
/// The connection received a data segment which does not fall within the limits of the
/// current receive window.
const SEGMENT_BEYOND_RWND = 1 << 2;
/// The connection received a data segment, but the sequence number does not match the
/// next expected sequence number.
const UNEXPECTED_SEQ = 1 << 3;
/// The other endpoint advertised a receive window edge which has been moved to the left.
const REMOTE_RWND_EDGE = 1 << 4;
/// The other endpoint transmitted additional data after sending a `FIN`.
const DATA_BEYOND_FIN = 1 << 5;
/// The connection received a valid `RST` segment.
const RESET_RECEIVED = 1 << 6;
/// The connection received an invalid `RST` segment.
const INVALID_RST = 1 << 7;
/// The connection received an invalid segment for its current state.
const INVALID_SEGMENT = 1 << 8;
/// The connection is resetting, and will switch to being reset after getting the
/// chance to transmit a `RST` segment.
const CONN_RESETTING = 1 << 9;
/// The connection received a `FIN` whose sequence number does not match the next
/// expected sequence number.
const INVALID_FIN = 1 << 10;
}
}
/// Defines a segment payload source.
///
/// When not `None`, it contains a [`ByteBuffer`] which holds the actual data, and the sequence
/// number associated with the first byte from the buffer.
///
/// [`ByteBuffer`]: ../../trait.ByteBuffer.html
// R should have the trait bound R: ByteBuffer, but bounds are ignored on type aliases.
pub type PayloadSource<'a, R> = Option<(&'a R, Wrapping<u32>)>;
/// Describes errors which may occur during a passive open.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum PassiveOpenError {
/// The incoming segment is not a valid `SYN`.
InvalidSyn,
/// The `SYN` segment carries an invalid `MSS` option.
MssOption,
}
/// Describes errors which may occur when an existing connection receives a TCP segment.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum RecvError {
/// The payload length is larger than the receive buffer size.
BufferTooSmall,
/// The connection cannot receive the segment because it has been previously reset.
ConnectionReset,
}
/// Describes errors which may occur when a connection attempts to write a segment.
/// Needs `rustfmt::skip` to make multiline comments work
#[rustfmt::skip]
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum WriteNextError {
/// The connection cannot write the segment because it has been previously reset.
ConnectionReset,
/// The write sends additional data after a `FIN` has been transmitted.
DataAfterFin,
/** The remaining MSS (which can be reduced by IP and/or TCP options) is not large enough to \
write the segment. */
MssRemaining,
// The payload source specifies a buffer larger than [`MAX_WINDOW_SIZE`].
//
// [`MAX_WINDOW_SIZE`]: ../constant.MAX_WINDOW_SIZE.html
/// The payload source is too large.
PayloadBufTooLarge,
/// The payload source does not contain the first sequence number that should be sent.
PayloadMissingSeq,
/// An error occurred during the actual write to the buffer: {0}
TcpSegment(#[from] TcpSegmentError),
}
/// Contains the state information and implements the logic for a minimalist TCP connection.
///
/// One particular thing is that whenever the connection sends a `RST` segment, it will also stop
/// working itself. This is just a design decision for our envisioned use cases;
/// improvements/changes may happen in the future (this also goes for other aspects of the
/// current implementation).
///
/// A `Connection` object can only be created via passive open, and will not recognize/use any TCP
/// options except `MSS` during the handshake. The associated state machine is similar to how
/// TCP normally functions, but there are some differences:
///
/// * Since only passive opens are supported, a `Connection` can only be instantiated in response to
/// an incoming `SYN` segment. If the segment is valid, it will start directly in a state called
/// `SYN_RECEIVED`. The valid events at this point are receiving a retransmission of the previous
/// `SYN` (which does nothing), and getting the chance to write a `SYNACK`, which also moves the
/// connection to the `SYNACK_SENT` state. Any incoming segment which is not a copy of the
/// previous `SYN` will reset the connection.
/// * In the `SYNACK_SENT` state, the connection awaits an `ACK` for the `SYNACK`. A retransmission
/// of the original `SYN` moves the state back to `SYN_RECEIVED`. A valid `ACK` advances the state
/// to `ESTABLISHED`. Any unexpected/invalid segment resets the connection.
/// * While `ESTABLISHED`, the connection will only reset if it receives a `RST` or a `SYN`. Invalid
/// segments are simply ignored. `FIN` handling is simplifed: when [`close`] is invoked the
/// connection records the `FIN` sequence number, and starts setting the `FIN` flag (when
/// possible) on outgoing segments. A `FIN` from the other endpoint is only taken into
/// consideration if it has the next expected sequence number. When the connection has both sent
/// and received a `FIN`, it marks itself as being done. There's no equivalent for the `TIME_WAIT`
/// TCP state.
///
/// The current implementation does not do any kind of congestion control, expects segments to
/// arrive in order, triggers a retransmission after the first duplicate `ACK`, and relies on the
/// user to supply an opaque `u64` timestamp value when invoking send or receive functionality. The
/// timestamps must be non-decreasing, and are mainly used for retransmission timeouts.
///
/// See [mmds-design](https://github.com/firecracker-microvm/firecracker/blob/main/docs/mmds/mmds-design.md#dumbo)
/// for why we are able to make these simplifications. Specifically, we want to stress that no
/// traffic handled by dumbo ever leaves a microVM.
///
/// [`close`]: #method.close
#[derive(Debug, Clone)]
pub struct Connection {
// The sequence number to ACK at the next opportunity. This is 1 + the highest received
// in-order sequence number.
ack_to_send: Wrapping<u32>,
// The highest ACK we received from the other end of the connection.
highest_ack_received: Wrapping<u32>,
// The sequence number of the first byte which has NOT yet been sent to the other endpoint.
first_not_sent: Wrapping<u32>,
// The right edge of the local receive window. We shouldn't receive any data past this point.
local_rwnd_edge: Wrapping<u32>,
// The right edge of the remote receive window. We shouldn't send any data past this point.
remote_rwnd_edge: Wrapping<u32>,
// The last time we received an ACK which advanced the receive window. Only makes sense as
// long as we seq_after(first_not_sent, highest_ack_received), and if we sent something that
// takes up sequence number space.
rto_start: u64,
// How much time can pass after rto_start, without making progress in the ACK space, before a
// retransmission is triggered.
rto_period: u64,
// How many retransmissions triggered before receiving a valid ACK from the other endpoint.
rto_count: u16,
// When rto_count reaches this value, the next retransmission will actually reset the
// connection.
rto_count_max: u16,
// Set to the FIN sequence number received from the other endpoint.
fin_received: Option<Wrapping<u32>>,
// When set, it represents the sequence number of the FIN byte which closes our end of the
// connection. No data may be sent past that point.
send_fin: Option<Wrapping<u32>>,
// If some, send a RST segment with the specified sequence and ACK numbers, and mark the
// connection as reset afterwards. The second option determines whether we set the ACK flag
// on the RST segment.
send_rst: Option<RstConfig>,
// The MSS used when sending data segments.
mss: u16,
// If true, send an ACK segment at the first opportunity. ACKs can piggyback data segments, so
// we'll only send an empty ACK segment if we can't transmit any data.
pending_ack: bool,
// We've got a duplicate ACK, so we'll retransmit the highest ACKed sequence number at the
// first opportunity. Unlike regular TCP, we retransmit after the first duplicate ACK.
dup_ack: bool,
status_flags: ConnStatusFlags,
}
fn parse_mss_option<T: NetworkBytes + Debug>(
segment: &TcpSegment<T>,
) -> Result<u16, PassiveOpenError> {
match segment.parse_mss_option_unchecked(segment.header_len().into()) {
Ok(Some(value)) => Ok(value.get()),
Ok(None) => Ok(MSS_DEFAULT),
Err(_) => Err(PassiveOpenError::MssOption),
}
}
fn is_valid_syn<T: NetworkBytes + Debug>(segment: &TcpSegment<T>) -> bool {
segment.flags_after_ns() == TcpFlags::SYN && segment.payload_len() == 0
}
impl Connection {
/// Attempts to create a new `Connection` in response to an incoming `SYN` segment.
///
/// # Arguments
///
/// * `segment` - The incoming `SYN`.
/// * `local_rwnd_size` - Initial size of the local receive window.
/// * `rto_period` - How long the connection waits before a retransmission timeout fires for the
/// first segment which has not been acknowledged yet. This uses an opaque time unit.
/// * `rto_count_max` - How many consecutive timeout-based retransmission may occur before the
/// connection resets itself.
pub fn passive_open<T: NetworkBytes + Debug>(
segment: &TcpSegment<T>,
local_rwnd_size: u32,
rto_period: NonZeroU64,
rto_count_max: NonZeroU16,
) -> Result<Self, PassiveOpenError> {
// We don't accepting anything other than a SYN segment here.
if !is_valid_syn(segment) {
return Err(PassiveOpenError::InvalidSyn);
}
// TODO: If we ever implement window scaling, change the part that computes
// remote_rwnd_edge below.
// We only care about the MSS option for now.
let mss = parse_mss_option(segment)?;
// This is going to get sent on the SYNACK.
let ack_to_send = Wrapping(segment.sequence_number()) + Wrapping(1);
// Let's pick the initial sequence number.
let isn = Wrapping(xor_pseudo_rng_u32());
let first_not_sent = isn + Wrapping(1);
let remote_rwnd_edge = first_not_sent + Wrapping(u32::from(segment.window_size()));
Ok(Connection {
ack_to_send,
highest_ack_received: isn,
// The ISN is sent over the SYNACK, and this is the next sequence number.
first_not_sent,
local_rwnd_edge: ack_to_send + Wrapping(local_rwnd_size),
// We have no information about this yet. It will get updated as the connection reaches
// the ESTABLISHED state.
remote_rwnd_edge,
rto_start: 0,
rto_period: rto_period.get(),
rto_count: 0,
rto_count_max: rto_count_max.get(),
fin_received: None,
send_fin: None,
send_rst: None,
mss,
pending_ack: false,
dup_ack: false,
status_flags: ConnStatusFlags::SYN_RECEIVED,
})
}
fn flags_intersect(&self, flags: ConnStatusFlags) -> bool {
self.status_flags.intersects(flags)
}
fn set_flags(&mut self, flags: ConnStatusFlags) {
self.status_flags.insert(flags);
}
fn clear_flags(&mut self, flags: ConnStatusFlags) {
self.status_flags.remove(flags);
}
fn syn_received(&self) -> bool {
self.flags_intersect(ConnStatusFlags::SYN_RECEIVED)
}
fn synack_pending(&self) -> bool {
self.syn_received() && !self.synack_sent()
}
fn synack_sent(&self) -> bool {
self.flags_intersect(ConnStatusFlags::SYNACK_SENT)
}
fn is_reset(&self) -> bool {
self.flags_intersect(ConnStatusFlags::RESET)
}
fn fin_sent(&self) -> bool {
self.flags_intersect(ConnStatusFlags::FIN_SENT)
}
fn fin_acked(&self) -> bool {
self.flags_intersect(ConnStatusFlags::FIN_ACKED)
}
fn is_same_syn<T: NetworkBytes + Debug>(&self, segment: &TcpSegment<T>) -> bool {
// This only really makes sense before getting into ESTABLISHED, but that's fine
// because we only use it before that point.
if !is_valid_syn(segment) || self.ack_to_send.0 != segment.sequence_number().wrapping_add(1)
{
return false;
}
matches!(parse_mss_option(segment), Ok(mss) if mss == self.mss)
}
fn reset_for_segment<T: NetworkBytes + Debug>(&mut self, s: &TcpSegment<T>) {
if !self.rst_pending() {
self.send_rst = Some(RstConfig::new(s));
}
}
fn rst_pending(&self) -> bool {
self.send_rst.is_some()
}
fn rto_expired(&self, now: u64) -> bool {
now - self.rto_start >= self.rto_period
}
// We send a FIN control segment if every data byte up to the self.send_fin sequence number
// has been ACKed by the other endpoint, and no FIN has been previously sent.
fn can_send_first_fin(&self) -> bool {
!self.fin_sent()
&& matches!(self.send_fin, Some(fin_seq) if fin_seq == self.highest_ack_received)
}
// Returns the window size which should be written to an outgoing segment. This is going to be
// even more useful when we'll support window scaling.
fn local_rwnd(&self) -> u16 {
let rwnd = (self.local_rwnd_edge - self.ack_to_send).0;
u16::try_from(rwnd).unwrap_or(u16::MAX)
}
// Will actually become meaningful when/if we implement window scaling.
fn remote_window_size(&self, window_size: u16) -> u32 {
u32::from(window_size)
}
// Computes the remote rwnd edge given the ACK number and window size from an incoming segment.
fn compute_remote_rwnd_edge(&self, ack: Wrapping<u32>, window_size: u16) -> Wrapping<u32> {
ack + Wrapping(self.remote_window_size(window_size))
}
// Has this name just in case the pending_ack status will be more than just some boolean at
// some point in the future.
fn enqueue_ack(&mut self) {
self.pending_ack = true;
}
/// Closes this half of the connection.
///
/// Subsequent calls after the first one do not have any effect. The sequence number of the
/// `FIN` is the first sequence number not yet sent at this point.
#[inline]
pub fn close(&mut self) {
if self.send_fin.is_none() {
self.send_fin = Some(self.first_not_sent);
}
}
/// Returns a valid configuration for a `RST` segment, which can be sent to the other
/// endpoint to signal the connection should be reset.
#[inline]
pub fn make_rst_config(&self) -> RstConfig {
if self.is_established() {
RstConfig::Seq(self.first_not_sent.0)
} else {
RstConfig::Ack(self.ack_to_send.0)
}
}
/// Specifies that a `RST` segment should be sent to the other endpoint, and then the
/// connection should be destroyed.
#[inline]
pub fn reset(&mut self) {
if !self.rst_pending() {
self.send_rst = Some(self.make_rst_config());
}
}
/// Returns `true` if the connection is past the `ESTABLISHED` point.
#[inline]
pub fn is_established(&self) -> bool {
self.flags_intersect(ConnStatusFlags::ESTABLISHED)
}
/// Returns `true` if a `FIN` has been received.
#[inline]
pub fn fin_received(&self) -> bool {
self.fin_received.is_some()
}
// TODO: The description of this method is also a TODO in disguise.
/// Returns `true` if the connection is done communicating with the other endpoint.
///
/// Maybe it would be a good idea to return true only after our FIN has also been ACKed?
/// Otherwise, when using the TCP handler there's pretty much always going to be an ACK for the
/// FIN that's going to trigger a gratuitous RST (best case), or can even be considered valid if
/// a new connection is created meanwhile using the same tuple and we get very unlucky (worst
/// case, extremely unlikely though).
#[inline]
pub fn is_done(&self) -> bool {
self.is_reset() || (self.fin_received() && self.flags_intersect(ConnStatusFlags::FIN_SENT))
}
/// Returns the first sequence number which has not been sent yet for the current window.
#[inline]
pub fn first_not_sent(&self) -> Wrapping<u32> {
self.first_not_sent
}
/// Returns the highest acknowledgement number received for the current window.
#[inline]
pub fn highest_ack_received(&self) -> Wrapping<u32> {
self.highest_ack_received
}
/// Advances the right edge of the local receive window.
///
/// This is effectively allowing the other endpoint to send more data, because no byte can be
/// sent unless its sequence number falls into the receive window.
// TODO: return the actual advance value here
#[inline]
pub fn advance_local_rwnd_edge(&mut self, value: u32) {
let v = Wrapping(value);
let max_w = Wrapping(MAX_WINDOW_SIZE);
let current_w = self.local_rwnd_edge - self.ack_to_send;
// Enqueue an ACK if we have to let the other endpoint know the window is opening.
if current_w.0 == 0 {
self.enqueue_ack();
}
if v + current_w > max_w {
self.local_rwnd_edge = self.ack_to_send + max_w;
} else {
self.local_rwnd_edge += v;
}
}
/// Returns the right edge of the receive window advertised by the other endpoint.
#[inline]
pub fn remote_rwnd_edge(&self) -> Wrapping<u32> {
self.remote_rwnd_edge
}
/// Returns `true` if a retransmission caused by the reception of a duplicate `ACK` is pending.
#[inline]
pub fn dup_ack_pending(&self) -> bool {
self.dup_ack
}
/// Describes whether a control segment can be sent immediately, a retransmission is pending,
/// or there's nothing to transmit until more segments are received.
///
/// This function does not tell whether any data segments can/will be sent, because the
/// Connection itself does not control the send buffer. Thus the information returned here
/// only pertains to control segments and timeout expiry. Data segment related status will
/// be reported by higher level components, which also manage the contents of the send buffer.
#[inline]
pub fn control_segment_or_timeout_status(&self) -> NextSegmentStatus {
if self.synack_pending()
|| self.rst_pending()
|| self.can_send_first_fin()
|| self.pending_ack
{
NextSegmentStatus::Available
} else if self.highest_ack_received != self.first_not_sent {
NextSegmentStatus::Timeout(self.rto_start + self.rto_period)
} else {
NextSegmentStatus::Nothing
}
}
// We use this helper method to set up self.send_rst and prepare a return value in one go. It's
// only used by the receive_segment() method.
fn reset_for_segment_helper<T: NetworkBytes + Debug>(
&mut self,
s: &TcpSegment<T>,
flags: RecvStatusFlags,
) -> Result<(Option<NonZeroUsize>, RecvStatusFlags), RecvError> {
self.reset_for_segment(s);
Ok((None, RecvStatusFlags::CONN_RESETTING | flags))
}
/// Handles an incoming segment.
///
/// When no errors occur, returns a pair consisting of how many
/// bytes (if any) were received, and whether any unusual conditions arose while processing the
/// segment. Since a `Connection` does not have its own internal buffer, `buf` is required to
/// store any data carried by incoming segments.
///
/// # Arguments
///
/// * `s` - The incoming segment.
/// * `buf` - The receive buffer where payload data (if any) from `s` is going to be written.
/// * `now` - An opaque timestamp representing the current moment in time.
pub fn receive_segment<T: NetworkBytes + Debug>(
&mut self,
s: &TcpSegment<T>,
buf: &mut [u8],
now: u64,
) -> Result<(Option<NonZeroUsize>, RecvStatusFlags), RecvError> {
if self.rst_pending() || self.is_reset() {
return Err(RecvError::ConnectionReset);
}
// TODO: The following logic fully makes sense only for a passive open (which is what we
// currently support). Things must change a bit if/when we also implement active opens.
let segment_flags = s.flags_after_ns();
if segment_flags.intersects(TcpFlags::RST) {
let seq = Wrapping(s.sequence_number());
// We accept the RST only if it carries an in-window sequence number.
// TODO: If/when we support active opens, we'll also have to accept RST/SYN segments,
// which must acknowledge our SYN to be valid.
if seq_at_or_after(seq, self.ack_to_send) && seq_after(self.local_rwnd_edge, seq) {
self.set_flags(ConnStatusFlags::RESET);
return Ok((None, RecvStatusFlags::RESET_RECEIVED));
} else {
return Ok((None, RecvStatusFlags::INVALID_RST));
}
}
let payload_len = s.len() - u16::from(s.header_len());
let mut recv_status_flags = RecvStatusFlags::empty();
if !self.synack_sent() {
// We received another segment before getting the chance to send a SYNACK. It's either
// a retransmitted SYN, or something that does not make sense.
if self.is_same_syn(s) {
return Ok((None, recv_status_flags));
} else {
return self.reset_for_segment_helper(s, RecvStatusFlags::INVALID_SEGMENT);
}
} else if !self.is_established() {
// So at this point we've sent at least one SYNACK, but the connection is not
// ESTABLISHED yet. We only accept SYN retransmissions and ACKs. I'm not sure that
// it's completely forbidden to sent an ACK + data in response to a SYNACK, so we don't
// complain about non-pure ACKs (or even data + ACK + FIN segments).
if self.is_same_syn(s) {
// Maybe our previous SYNACK got lost or smt, so clear SYN_ACK_SENT to resend it.
self.clear_flags(ConnStatusFlags::SYNACK_SENT);
return Ok((None, recv_status_flags));
} else if segment_flags.intersects(TcpFlags::SYN) {
// So we basically freak out over SYN segments which are not valid SYN
// retransmission.
return self.reset_for_segment_helper(s, RecvStatusFlags::INVALID_SEGMENT);
}
} else {
// Reaching this branch means the connection is ESTABLISHED. The only thing we want to
// do right now is reset if we get segments which carry the SYN flag, because they are
// obviously invalid, and something must be really wrong.
// TODO: Is it an overreaction to reset here?
if s.flags_after_ns().intersects(TcpFlags::SYN) {
return self.reset_for_segment_helper(s, RecvStatusFlags::INVALID_SEGMENT);
}
}
// The ACK number can only be valid when ACK flag is set. The following logic applies to
// pretty much all connection states which can reach this point.
if segment_flags.intersects(TcpFlags::ACK) {
let ack = Wrapping(s.ack_number());
if seq_at_or_after(ack, self.highest_ack_received)
&& seq_at_or_after(self.first_not_sent, ack)
{
// This is a valid ACK. Reset rto_count, since this means the other side is still
// alive and kicking (or ACking).
self.rto_count = 0;
if ack == self.highest_ack_received && ack != self.first_not_sent {
if !self.is_established() {
// Just kidding, a DUPACK is not valid before the connection is ESTABLISHED.
return self.reset_for_segment_helper(s, RecvStatusFlags::INVALID_ACK);
}
// Duplicate ACKs can only increase in sequence number, so there's no need
// to check if this one is older than self.dup_ack.
self.dup_ack = true;
recv_status_flags |= RecvStatusFlags::DUP_ACK;
} else {
// We're making progress. We should also reset rto_start in this case.
self.highest_ack_received = ack;
self.rto_start = now;
if !self.is_established() && self.synack_sent() {
// The connection becomes ESTABLISHED.
self.set_flags(ConnStatusFlags::ESTABLISHED);
}
if self.fin_sent() && ack == self.first_not_sent {
self.set_flags(ConnStatusFlags::FIN_ACKED);
}
}
// Look for remote remote rwnd updates.
if self.is_established() {
let edge = self.compute_remote_rwnd_edge(ack, s.window_size());
if seq_after(edge, self.remote_rwnd_edge) {
self.remote_rwnd_edge = edge;
} else if edge != self.remote_rwnd_edge {
// The right edge of the remote receive window has been moved to the left,
// or has been set to an invalid value. Both cases represent erroneous TCP
// behaviour.
recv_status_flags |= RecvStatusFlags::REMOTE_RWND_EDGE;
}
}
} else {
recv_status_flags |= RecvStatusFlags::INVALID_ACK;
if !self.is_established() {
// Reset the connection if we receive an invalid ACK before reaching the
// ESTABLISHED state.
return self.reset_for_segment_helper(s, recv_status_flags);
}
}
}
// We start looking at the payload and/or FIN next. This makes sense only if the
// connection is established.
if !self.is_established() {
return Ok((None, recv_status_flags));
}
let seq = Wrapping(s.sequence_number());
let wrapping_payload_len = Wrapping(u32::from(payload_len));
if usize::from(payload_len) > buf.len() {
return Err(RecvError::BufferTooSmall);
}
let mut enqueue_ack = if payload_len > 0 {
let data_end_seq = seq + wrapping_payload_len;
if let Some(fin_seq) = self.fin_received
&& !seq_at_or_after(fin_seq, data_end_seq)
{
// TODO: This is a strange situation, because the other endpoint is sending data
// after it initially closed its half of the connection. We simply ignore the
// segment for now.
return Ok((None, recv_status_flags | RecvStatusFlags::DATA_BEYOND_FIN));
}
if !seq_at_or_after(self.local_rwnd_edge, data_end_seq) {
// TODO: This is another strange (and potentially dangerous) situation, because
// either we or the other endpoint broke receive window semantics. We simply ignore
// the segment for now.
return Ok((
None,
recv_status_flags | RecvStatusFlags::SEGMENT_BEYOND_RWND,
));
}
// We currently assume segments are seldom lost or reordered, and only accept those with
// the exact next sequence number we're waiting for.
if seq != self.ack_to_send {
// TODO: Maybe we should enqueue multiple ACKs here (after making such a thing
// possible in the first place), just so we're more likely to trigger a
// retransmission.
self.enqueue_ack();
return Ok((None, recv_status_flags | RecvStatusFlags::UNEXPECTED_SEQ));
}
self.ack_to_send = data_end_seq;
true
} else {
false
};
// We assume the sequence number of the FIN does not change via conflicting FIN carrying
// segments (as it should be the case during TCP normal operation). It the other endpoint
// breaks this convention, it will have to deal with potentially hanging (until timing out)
// connections and/or RST segments.
if segment_flags.intersects(TcpFlags::FIN) && !self.fin_received() {
let fin_seq = seq + wrapping_payload_len;
// In order to avoid some complexity on our side, we only accept an incoming FIN if its
// sequence number matches that of the first byte yet to be received (this is similar to
// what we do for data segments right now).
if fin_seq == self.ack_to_send {
self.fin_received = Some(fin_seq);
// Increase this to also ACK the FIN.
self.ack_to_send += Wrapping(1);
enqueue_ack = true;
} else {
recv_status_flags |= RecvStatusFlags::INVALID_FIN;
}
}
if enqueue_ack {
self.enqueue_ack();
// We check this here because if a valid payload has been received, then we must have
// set enqueue_ack = true earlier.
if let Some(payload_len) = NonZeroUsize::new(payload_len.into()) {
buf[..payload_len.into()].copy_from_slice(s.payload());
return Ok((Some(payload_len), recv_status_flags));
}
}
Ok((None, recv_status_flags))
}
// The write helper functions return incomplete segments because &self does not have information
// regarding the identity of the endpoints, such as source and destination ports, or source and
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/tcp/mod.rs | src/vmm/src/dumbo/tcp/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Provides functionality for handling incoming TCP connections.
pub mod connection;
mod endpoint;
pub mod handler;
use std::fmt::Debug;
use std::num::Wrapping;
use crate::dumbo::pdu::bytes::NetworkBytes;
use crate::dumbo::pdu::tcp::{Flags as TcpFlags, TcpSegment};
/// The largest possible window size (requires the window scaling option).
pub const MAX_WINDOW_SIZE: u32 = 1_073_725_440;
/// The default maximum segment size (MSS) value, used when no MSS information is carried
/// over the initial handshake.
pub const MSS_DEFAULT: u16 = 536;
/// Describes whether a particular entity (a [`Connection`] for example) has segments to send.
///
/// [`Connection`]: connection/struct.Connection.html
#[derive(Debug, PartialEq, Eq)]
pub enum NextSegmentStatus {
/// At least one segment is available immediately.
Available,
/// There's nothing to send.
Nothing,
/// A retransmission timeout (RTO) will trigger after the specified point in time.
Timeout(u64),
}
/// Represents the configuration of the sequence number and `ACK` number fields for outgoing
/// `RST` segments.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RstConfig {
/// The `RST` segment will carry the specified sequence number, and will not have
/// the `ACK` flag set.
Seq(u32),
/// The `RST` segment will carry 0 as the sequence number, will have the `ACK` flag enabled,
/// and the `ACK` number will be set to the specified value.
Ack(u32),
}
impl RstConfig {
/// Creates a `RstConfig` in response to the given segment.
pub fn new<T: NetworkBytes + Debug>(s: &TcpSegment<T>) -> Self {
if s.flags_after_ns().intersects(TcpFlags::ACK) {
// If s contains an ACK number, we use that as the sequence number of the RST.
RstConfig::Seq(s.ack_number())
} else {
// Otherwise we try to guess a valid ACK number for the RST like this.
RstConfig::Ack(s.sequence_number().wrapping_add(s.payload_len().into()))
}
}
/// Returns the sequence number, acknowledgement number, and TCP flags (not counting `NS`) that
/// must be set on the outgoing `RST` segment.
pub fn seq_ack_tcp_flags(self) -> (u32, u32, TcpFlags) {
match self {
RstConfig::Seq(seq) => (seq, 0, TcpFlags::RST),
RstConfig::Ack(ack) => (0, ack, TcpFlags::RST | TcpFlags::ACK),
}
}
}
/// Returns true if `a` comes after `b` in the sequence number space, relative to the maximum
/// possible window size.
///
/// Please note this is not a connex binary relation; in other words, given two sequence numbers,
/// it's sometimes possible that `seq_after(a, b) || seq_after(b, a) == false`. This is why
/// `seq_after(a, b)` can't be defined as simply `!seq_at_or_after(b, a)`.
#[inline]
pub fn seq_after(a: Wrapping<u32>, b: Wrapping<u32>) -> bool {
a != b && (a - b).0 < MAX_WINDOW_SIZE
}
/// Returns true if `a` comes after, or is at `b` in the sequence number space, relative to
/// the maximum possible window size.
///
/// Please note this is not a connex binary relation; in other words, given two sequence numbers,
/// it's sometimes possible that `seq_at_or_after(a, b) || seq_at_or_after(b, a) == false`. This
/// is why `seq_after(a, b)` can't be defined as simply `!seq_at_or_after(b, a)`.
#[inline]
pub fn seq_at_or_after(a: Wrapping<u32>, b: Wrapping<u32>) -> bool {
(a - b).0 < MAX_WINDOW_SIZE
}
#[cfg(test)]
mod tests {
use micro_http::{Request, Response, StatusCode, Version};
use super::*;
// In tcp tests, some of the functions require a callback parameter. Since we do not care,
// for the purpose of those tests, what that callback does, we need to provide a dummy one.
pub fn mock_callback(_request: Request) -> Response {
Response::new(Version::Http11, StatusCode::OK)
}
#[test]
fn test_rst_config() {
let mut buf = [0u8; 100];
let seq = 1234;
let ack = 5678;
let mut s = TcpSegment::write_segment::<[u8]>(
buf.as_mut(),
0,
0,
seq,
ack,
TcpFlags::empty(),
0,
None,
100,
None,
None,
)
.unwrap();
// The ACK flag isn't set, and the payload length is 0.
let cfg = RstConfig::new(&s);
assert_eq!(cfg, RstConfig::Ack(seq));
assert_eq!(
cfg.seq_ack_tcp_flags(),
(0, seq, TcpFlags::RST | TcpFlags::ACK)
);
// Let's set the ACK flag.
s.set_flags_after_ns(TcpFlags::ACK);
let cfg = RstConfig::new(&s);
assert_eq!(cfg, RstConfig::Seq(ack));
assert_eq!(cfg.seq_ack_tcp_flags(), (ack, 0, TcpFlags::RST));
}
#[test]
fn test_seq_at_or_after() {
let a = Wrapping(123);
let b = a + Wrapping(100);
let c = a + Wrapping(MAX_WINDOW_SIZE);
assert!(seq_at_or_after(a, a));
assert!(!seq_after(a, a));
assert!(seq_at_or_after(b, a));
assert!(seq_after(b, a));
assert!(!seq_at_or_after(a, b));
assert!(!seq_after(a, b));
assert!(!seq_at_or_after(c, a));
assert!(!seq_after(c, a));
assert!(seq_at_or_after(c, b));
assert!(seq_after(c, b));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/tcp/handler.rs | src/vmm/src/dumbo/tcp/handler.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Exposes simple TCP over IPv4 listener functionality via the [`TcpIPv4Handler`] structure.
//!
//! [`TcpIPv4Handler`]: struct.TcpIPv4Handler.html
use std::collections::{HashMap, HashSet};
use std::fmt::Debug;
use std::net::Ipv4Addr;
use std::num::NonZeroUsize;
use micro_http::{Request, Response};
use crate::dumbo::pdu::bytes::NetworkBytes;
use crate::dumbo::pdu::ipv4::{IPv4Packet, Ipv4Error as IPv4PacketError, PROTOCOL_TCP};
use crate::dumbo::pdu::tcp::{Flags as TcpFlags, TcpError as TcpSegmentError, TcpSegment};
use crate::dumbo::tcp::endpoint::Endpoint;
use crate::dumbo::tcp::{NextSegmentStatus, RstConfig};
// TODO: This is currently IPv4 specific. Maybe change it to a more generic implementation.
/// Describes events which may occur when the handler receives packets.
#[derive(Debug, PartialEq, Eq)]
pub enum RecvEvent {
/// The local endpoint is done communicating, and has been removed.
EndpointDone,
/// An error occurred while trying to create a new `Endpoint` object, based on an incoming
/// `SYN` segment.
FailedNewConnection,
/// A new local `Endpoint` has been successfully created.
NewConnectionSuccessful,
/// Failed to add a local `Endpoint` because the handler is already at the maximum number of
/// concurrent connections, and there are no evictable Endpoints.
NewConnectionDropped,
/// A new local `Endpoint` has been successfully created, but the handler had to make room by
/// evicting an older `Endpoint`.
NewConnectionReplacing,
/// Nothing interesting happened regarding the state of the handler.
Nothing,
/// The handler received a non-`SYN` segment which does not belong to any existing
/// connection.
UnexpectedSegment,
}
/// Describes events which may occur when the handler writes packets.
#[derive(Debug, PartialEq, Eq)]
pub enum WriteEvent {
/// The local `Endpoint` transitioned to being done after this segment was written.
EndpointDone,
/// Nothing interesting happened.
Nothing,
}
/// Describes errors which may be encountered by the [`receive_packet`] method from
/// [`TcpIPv4Handler`].
///
/// [`receive_packet`]: struct.TcpIPv4Handler.html#method.receive_packet
/// [`TcpIPv4Handler`]: struct.TcpIPv4Handler.html
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum RecvError {
/// The inner segment has an invalid destination port.
InvalidPort,
/// The handler encountered an error while parsing the inner TCP segment: {0}
TcpSegment(#[from] TcpSegmentError),
}
/// Describes errors which may be encountered by the [`write_next_packet`] method from
/// [`TcpIPv4Handler`].
///
/// [`write_next_packet`]: struct.TcpIPv4Handler.html#method.write_next_packet
/// [`TcpIPv4Handler`]: struct.TcpIPv4Handler.html
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum WriteNextError {
/// There was an error while writing the contents of the IPv4 packet: {0}
IPv4Packet(#[from] IPv4PacketError),
/// There was an error while writing the contents of the inner TCP segment: {0}
TcpSegment(#[from] TcpSegmentError),
}
// Generally speaking, a TCP/IPv4 connection is identified using the four-tuple (src_addr, src_port,
// dst_addr, dst_port). However, the IPv4 address and TCP port of the MMDS endpoint are fixed, so
// we can get away with uniquely identifying connections using just the remote address and port.
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq)]
struct ConnectionTuple {
remote_addr: Ipv4Addr,
remote_port: u16,
}
impl ConnectionTuple {
fn new(remote_addr: Ipv4Addr, remote_port: u16) -> Self {
ConnectionTuple {
remote_addr,
remote_port,
}
}
}
/// Implements a minimalist TCP over IPv4 listener.
///
/// Forwards incoming TCP segments to the appropriate connection object, based on the associated
/// tuple, or attempts to establish new connections (when receiving `SYN` segments). Aside from
/// constructors, the handler operation is based on three methods:
///
/// * [`receive_packet`] examines an incoming IPv4 packet. It checks whether the destination address
/// is correct, the attempts examine the inner TCP segment, making sure the destination port
/// number is also correct. Then, it steers valid segments towards exiting connections, creates
/// new connections for incoming `SYN` segments, and enqueues `RST` replies in response to any
/// segments which cannot be associated with a connection (except other `RST` segments). On
/// success, also describes any internal status changes triggered by the reception of the packet.
/// * [`write_next_packet`] writes the next IPv4 packet (if available) that would be sent by the
/// handler itself (right now it can only mean an enqueued `RST`), or one of the existing
/// connections. On success, also describes any internal status changes triggered as the packet
/// gets transmitted.
/// * [`next_segment_status`] describes whether the handler can send a packet immediately, or after
/// some retransmission timeout associated with a connection fires, or if there's nothing to send
/// for the moment. This is used to determine whether it's appropriate to call
/// [`write_next_packet`].
///
/// [`receive_packet`]: ../handler/struct.TcpIPv4Handler.html#method.receive_packet
/// [`write_next_packet`]: ../handler/struct.TcpIPv4Handler.html#method.write_next_packet
/// [`next_segment_status`]: ../handler/struct.TcpIPv4Handler.html#method.next_segment_status
#[derive(Debug)]
pub struct TcpIPv4Handler {
// Handler IPv4 address used for every connection.
local_ipv4_addr: Ipv4Addr,
// Handler TCP port used for every connection.
local_port: u16,
// This map holds the currently active endpoints, identified by their connection tuple.
connections: HashMap<ConnectionTuple, Endpoint>,
// Maximum number of concurrent connections we are willing to handle.
max_connections: NonZeroUsize,
// Holds connections which are able to send segments immediately.
active_connections: HashSet<ConnectionTuple>,
// Remembers the closest timestamp into the future when one of the connections has to deal
// with an RTO trigger.
next_timeout: Option<(u64, ConnectionTuple)>,
// RST segments awaiting to be sent.
rst_queue: Vec<(ConnectionTuple, RstConfig)>,
// Maximum size of the RST queue.
max_pending_resets: NonZeroUsize,
}
// Only used locally, in the receive_packet method, to differentiate between different outcomes
// associated with processing incoming packets.
#[derive(Debug)]
enum RecvSegmentOutcome {
EndpointDone,
EndpointRunning(NextSegmentStatus),
NewConnection,
UnexpectedSegment(bool),
}
impl TcpIPv4Handler {
/// Creates a new `TcpIPv4Handler`.
///
/// The handler acts as if bound to `local_addr`:`local_port`, and will accept at most
/// `max_connections` concurrent connections. `RST` segments generated by unexpected incoming
/// segments are placed in a queue which is at most `max_pending_resets` long.
#[inline]
pub fn new(
local_ipv4_addr: Ipv4Addr,
local_port: u16,
max_connections: NonZeroUsize,
max_pending_resets: NonZeroUsize,
) -> Self {
TcpIPv4Handler {
local_ipv4_addr,
local_port,
connections: HashMap::with_capacity(max_connections.get()),
max_connections,
active_connections: HashSet::with_capacity(max_connections.get()),
next_timeout: None,
rst_queue: Vec::with_capacity(max_pending_resets.get()),
max_pending_resets,
}
}
/// Setter for the local IPv4 address of this TCP handler.
pub fn set_local_ipv4_addr(&mut self, ipv4_addr: Ipv4Addr) {
self.local_ipv4_addr = ipv4_addr;
}
/// Returns the local IPv4 address of this TCP handler.
pub fn local_ipv4_addr(&self) -> Ipv4Addr {
self.local_ipv4_addr
}
/// Returns the local port of this TCP handler.
pub fn local_port(&self) -> u16 {
self.local_port
}
/// Returns the max connections of this TCP handler.
pub fn max_connections(&self) -> NonZeroUsize {
self.max_connections
}
/// Returns the max pending resets of this TCP handler.
pub fn max_pending_resets(&self) -> NonZeroUsize {
self.max_pending_resets
}
/// Contains logic for handling incoming segments.
///
/// Any changes to the state of the handler are communicated through an `Ok(RecvEvent)`.
pub fn receive_packet<T: NetworkBytes + Debug, F: FnOnce(Request) -> Response>(
&mut self,
packet: &IPv4Packet<T>,
callback: F,
) -> Result<RecvEvent, RecvError> {
// TODO: We skip verifying the checksum, just in case the device model relies on offloading
// checksum computation from the guest to some other entity. Clear this up at some point!
// (Issue #520)
let segment = TcpSegment::from_bytes(packet.payload(), None)?;
if segment.destination_port() != self.local_port {
return Err(RecvError::InvalidPort);
}
let tuple = ConnectionTuple::new(packet.source_address(), segment.source_port());
let outcome = if let Some(endpoint) = self.connections.get_mut(&tuple) {
endpoint.receive_segment(&segment, callback);
if endpoint.is_done() {
RecvSegmentOutcome::EndpointDone
} else {
RecvSegmentOutcome::EndpointRunning(endpoint.next_segment_status())
}
} else if segment.flags_after_ns() == TcpFlags::SYN {
RecvSegmentOutcome::NewConnection
} else {
// We should send a RST for every non-RST unexpected segment we receive.
RecvSegmentOutcome::UnexpectedSegment(
!segment.flags_after_ns().intersects(TcpFlags::RST),
)
};
match outcome {
RecvSegmentOutcome::EndpointDone => {
self.remove_connection(tuple);
Ok(RecvEvent::EndpointDone)
}
RecvSegmentOutcome::EndpointRunning(status) => {
if !self.check_next_segment_status(tuple, status) {
// The connection may not have been a member of active_connection, but it's
// more straightforward to cover both cases this way.
self.active_connections.remove(&tuple);
}
Ok(RecvEvent::Nothing)
}
RecvSegmentOutcome::NewConnection => {
let endpoint = match Endpoint::new_with_defaults(&segment) {
Ok(endpoint) => endpoint,
Err(_) => return Ok(RecvEvent::FailedNewConnection),
};
if self.connections.len() >= self.max_connections.get() {
if let Some(evict_tuple) = self.find_evictable_connection() {
let rst_config = self.connections[&evict_tuple]
.connection()
.make_rst_config();
self.enqueue_rst_config(evict_tuple, rst_config);
self.remove_connection(evict_tuple);
self.add_connection(tuple, endpoint);
Ok(RecvEvent::NewConnectionReplacing)
} else {
// No room to accept the new connection. Try to enqueue a RST, and forget
// about it.
self.enqueue_rst(tuple, &segment);
Ok(RecvEvent::NewConnectionDropped)
}
} else {
self.add_connection(tuple, endpoint);
Ok(RecvEvent::NewConnectionSuccessful)
}
}
RecvSegmentOutcome::UnexpectedSegment(enqueue_rst) => {
if enqueue_rst {
self.enqueue_rst(tuple, &segment);
}
Ok(RecvEvent::UnexpectedSegment)
}
}
}
fn check_timeout(&mut self, value: u64, tuple: ConnectionTuple) {
match self.next_timeout {
Some((t, _)) if t > value => self.next_timeout = Some((value, tuple)),
None => self.next_timeout = Some((value, tuple)),
_ => (),
};
}
fn find_next_timeout(&mut self) {
let mut next_timeout = None;
for (tuple, endpoint) in self.connections.iter() {
if let NextSegmentStatus::Timeout(value) = endpoint.next_segment_status() {
if let Some((t, _)) = next_timeout {
if t > value {
next_timeout = Some((value, *tuple));
}
} else {
next_timeout = Some((value, *tuple));
}
}
}
self.next_timeout = next_timeout;
}
// Returns true if the endpoint has been added to the set of active connections (it may have
// been there already).
fn check_next_segment_status(
&mut self,
tuple: ConnectionTuple,
status: NextSegmentStatus,
) -> bool {
if let Some((_, timeout_tuple)) = self.next_timeout
&& tuple == timeout_tuple
{
self.find_next_timeout();
}
match status {
NextSegmentStatus::Available => {
self.active_connections.insert(tuple);
return true;
}
NextSegmentStatus::Timeout(value) => self.check_timeout(value, tuple),
NextSegmentStatus::Nothing => (),
};
false
}
fn add_connection(&mut self, tuple: ConnectionTuple, endpoint: Endpoint) {
self.check_next_segment_status(tuple, endpoint.next_segment_status());
self.connections.insert(tuple, endpoint);
}
fn remove_connection(&mut self, tuple: ConnectionTuple) {
// Just in case it's in there somewhere.
self.active_connections.remove(&tuple);
self.connections.remove(&tuple);
if let Some((_, timeout_tuple)) = self.next_timeout
&& timeout_tuple == tuple
{
self.find_next_timeout();
}
}
// TODO: I guess this should be refactored at some point to also remove the endpoint if found.
fn find_evictable_connection(&self) -> Option<ConnectionTuple> {
for (tuple, endpoint) in self.connections.iter() {
if endpoint.is_evictable() {
return Some(*tuple);
}
}
None
}
fn enqueue_rst_config(&mut self, tuple: ConnectionTuple, cfg: RstConfig) {
// We simply forgo sending any RSTs if the queue is already full.
if self.rst_queue.len() < self.max_pending_resets.get() {
self.rst_queue.push((tuple, cfg));
}
}
fn enqueue_rst<T: NetworkBytes + Debug>(&mut self, tuple: ConnectionTuple, s: &TcpSegment<T>) {
self.enqueue_rst_config(tuple, RstConfig::new(s));
}
/// Attempts to write one packet, from either the `RST` queue or one of the existing endpoints,
/// to `buf`.
///
/// On success, the function returns a pair containing an `Option<NonZeroUsize>` and a
/// `WriteEvent`. The options represents how many bytes have been written to `buf`, or
/// that no packet can be send presently (when equal to `None`). The `WriteEvent` describes
/// whether any noteworthy state changes are associated with the write.
pub fn write_next_packet(
&mut self,
buf: &mut [u8],
) -> Result<(Option<NonZeroUsize>, WriteEvent), WriteNextError> {
let mut len = None;
let mut writer_status = None;
let mut event = WriteEvent::Nothing;
// Write an incomplete Ipv4 packet and complete it afterwards with missing information.
let mut packet =
IPv4Packet::write_header(buf, PROTOCOL_TCP, Ipv4Addr::LOCALHOST, Ipv4Addr::LOCALHOST)?;
// We set mss_used to 0, because we don't add any IP options.
// TODO: Maybe get this nicely from packet at some point.
let mss_reserved = 0;
// We prioritize sending RSTs for now. The 10000 value for window size is just an arbitrary
// number, and using mss_remaining = 0 is perfectly fine in this case, because we don't add
// any TCP options, or a payload.
if let Some((tuple, rst_cfg)) = self.rst_queue.pop() {
let (seq, ack, flags_after_ns) = rst_cfg.seq_ack_tcp_flags();
let segment_len = TcpSegment::write_incomplete_segment::<[u8]>(
packet.inner_mut().payload_mut(),
seq,
ack,
flags_after_ns,
10000,
None,
0,
None,
)?
.finalize(
self.local_port,
tuple.remote_port,
Some((self.local_ipv4_addr, tuple.remote_addr)),
)
.len();
packet
.inner_mut()
.set_source_address(self.local_ipv4_addr)
.set_destination_address(tuple.remote_addr);
let packet_len = packet.with_payload_len_unchecked(segment_len, true).len();
// The unwrap() is safe because packet_len > 0.
return Ok((
Some(NonZeroUsize::new(packet_len).unwrap()),
WriteEvent::Nothing,
));
}
for tuple in self
.active_connections
.iter()
.chain(self.next_timeout.as_ref().map(|(_, x)| x))
{
// Tuples in self.active_connection or self.next_timeout should also appear as keys
// in self.connections.
let endpoint = self.connections.get_mut(tuple).unwrap();
// We need this block to clearly delimit the lifetime of the mutable borrow started by
// the following packet.inner_mut().
let segment_len = {
let maybe_segment =
endpoint.write_next_segment(packet.inner_mut().payload_mut(), mss_reserved);
match maybe_segment {
Some(segment) => segment
.finalize(
self.local_port,
tuple.remote_port,
Some((self.local_ipv4_addr, tuple.remote_addr)),
)
.len(),
None => continue,
}
};
packet
.inner_mut()
.set_source_address(self.local_ipv4_addr)
.set_destination_address(tuple.remote_addr);
let ip_len = packet.with_payload_len_unchecked(segment_len, true).len();
// The unwrap is safe because ip_len > 0.
len = Some(NonZeroUsize::new(ip_len).unwrap());
writer_status = Some((*tuple, endpoint.is_done()));
break;
}
if let Some((tuple, is_done)) = writer_status {
if is_done {
self.remove_connection(tuple);
event = WriteEvent::EndpointDone;
} else {
// The unwrap is safe because tuple is present as a key in self.connections if we
// got here.
let status = self.connections[&tuple].next_segment_status();
if !self.check_next_segment_status(tuple, status) {
self.active_connections.remove(&tuple);
}
}
}
Ok((len, event))
}
/// Describes the status of the next segment to be sent by the handler.
#[inline]
pub fn next_segment_status(&self) -> NextSegmentStatus {
if !self.active_connections.is_empty() || !self.rst_queue.is_empty() {
return NextSegmentStatus::Available;
}
if let Some((value, _)) = self.next_timeout {
return NextSegmentStatus::Timeout(value);
}
NextSegmentStatus::Nothing
}
}
#[cfg(test)]
mod tests {
use std::fmt::Debug;
use super::*;
use crate::dumbo::pdu::bytes::NetworkBytesMut;
use crate::dumbo::tcp::tests::mock_callback;
fn inner_tcp_mut<'a, T: NetworkBytesMut + Debug>(
p: &'a mut IPv4Packet<'_, T>,
) -> TcpSegment<'a, &'a mut [u8]> {
TcpSegment::from_bytes(p.payload_mut(), None).unwrap()
}
#[allow(clippy::type_complexity)]
fn write_next<'a>(
h: &mut TcpIPv4Handler,
buf: &'a mut [u8],
) -> Result<(Option<IPv4Packet<'a, &'a mut [u8]>>, WriteEvent), WriteNextError> {
h.write_next_packet(buf).map(|(o, err)| {
(
o.map(move |len| {
let len = len.get();
IPv4Packet::from_bytes(buf.split_at_mut(len).0, true).unwrap()
}),
err,
)
})
}
fn next_written_segment<'a>(
h: &mut TcpIPv4Handler,
buf: &'a mut [u8],
expected_event: WriteEvent,
) -> TcpSegment<'a, &'a mut [u8]> {
let (segment_start, segment_end) = {
let (o, event) = write_next(h, buf).unwrap();
assert_eq!(event, expected_event);
let p = o.unwrap();
(p.header_len(), p.len())
};
TcpSegment::from_bytes(&mut buf[segment_start.into()..segment_end], None).unwrap()
}
// Calls write_next_packet until either an error occurs, or there's nothing left to send.
// When successful, returns how many packets were written. The remote_addr argument is used
// to check the packets are sent to the appropriate destination.
fn drain_packets(
h: &mut TcpIPv4Handler,
src_addr: Ipv4Addr,
remote_addr: Ipv4Addr,
) -> Result<usize, WriteNextError> {
let mut buf = [0u8; 2000];
let mut count: usize = 0;
loop {
let (o, _) = write_next(h, buf.as_mut())?;
if let Some(packet) = o {
count += 1;
assert_eq!(packet.source_address(), src_addr);
assert_eq!(packet.destination_address(), remote_addr);
} else {
break;
}
}
Ok(count)
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_handler() {
let mut buf = [0u8; 100];
let mut buf2 = [0u8; 2000];
let wrong_local_addr = Ipv4Addr::new(123, 123, 123, 123);
let local_addr = Ipv4Addr::new(169, 254, 169, 254);
let local_port = 80;
let remote_addr = Ipv4Addr::new(10, 0, 0, 1);
let remote_port = 1012;
let max_connections = 2;
let max_pending_resets = 2;
let mut h = TcpIPv4Handler::new(
local_addr,
local_port,
NonZeroUsize::new(max_connections).unwrap(),
NonZeroUsize::new(max_pending_resets).unwrap(),
);
// We start with a wrong destination address and destination port to check those error
// conditions first.
let mut p =
IPv4Packet::write_header(buf.as_mut(), PROTOCOL_TCP, remote_addr, wrong_local_addr)
.unwrap();
let seq_number = 123;
let s_len = {
// We're going to use this simple segment to test stuff.
let s = TcpSegment::write_segment::<[u8]>(
p.inner_mut().payload_mut(),
remote_port,
// We use the wrong port here initially, to trigger an error.
local_port + 1,
seq_number,
456,
TcpFlags::empty(),
10000,
None,
100,
None,
None,
)
.unwrap();
s.len()
};
// The handler should have nothing to send at this point.
assert_eq!(h.next_segment_status(), NextSegmentStatus::Nothing);
assert_eq!(drain_packets(&mut h, local_addr, remote_addr), Ok(0));
let mut p = p.with_payload_len_unchecked(s_len, false);
p.set_destination_address(local_addr);
assert_eq!(
h.receive_packet(&p, mock_callback).unwrap_err(),
RecvError::InvalidPort
);
// Let's fix the port. However, the segment is not a valid SYN, so we should get an
// UnexpectedSegment status, and the handler should write a RST.
assert_eq!(h.rst_queue.len(), 0);
inner_tcp_mut(&mut p).set_destination_port(local_port);
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::UnexpectedSegment)
);
assert_eq!(h.rst_queue.len(), 1);
assert_eq!(h.next_segment_status(), NextSegmentStatus::Available);
{
let s = next_written_segment(&mut h, buf2.as_mut(), WriteEvent::Nothing);
assert!(s.flags_after_ns().intersects(TcpFlags::RST));
assert_eq!(s.source_port(), local_port);
assert_eq!(s.destination_port(), remote_port);
}
assert_eq!(h.rst_queue.len(), 0);
assert_eq!(h.next_segment_status(), NextSegmentStatus::Nothing);
// Let's check we can only enqueue max_pending_resets resets.
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::UnexpectedSegment)
);
assert_eq!(h.rst_queue.len(), 1);
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::UnexpectedSegment)
);
assert_eq!(h.rst_queue.len(), 2);
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::UnexpectedSegment)
);
assert_eq!(h.rst_queue.len(), 2);
// Drain the resets.
assert_eq!(h.next_segment_status(), NextSegmentStatus::Available);
assert_eq!(drain_packets(&mut h, local_addr, remote_addr), Ok(2));
assert_eq!(h.next_segment_status(), NextSegmentStatus::Nothing);
// Ok now let's send a valid SYN.
assert_eq!(h.connections.len(), 0);
inner_tcp_mut(&mut p).set_flags_after_ns(TcpFlags::SYN);
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::NewConnectionSuccessful)
);
assert_eq!(h.connections.len(), 1);
assert_eq!(h.active_connections.len(), 1);
// Let's immediately send a RST to the newly initiated connection. This should
// terminate it.
inner_tcp_mut(&mut p)
.set_flags_after_ns(TcpFlags::RST)
.set_sequence_number(seq_number.wrapping_add(1));
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::EndpointDone)
);
assert_eq!(h.connections.len(), 0);
assert_eq!(h.active_connections.len(), 0);
// Now, let's restore the previous SYN, and resend it to initiate a connection.
inner_tcp_mut(&mut p)
.set_flags_after_ns(TcpFlags::SYN)
.set_sequence_number(seq_number);
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::NewConnectionSuccessful)
);
assert_eq!(h.connections.len(), 1);
assert_eq!(h.active_connections.len(), 1);
// There will be a SYNACK in response.
assert_eq!(h.next_segment_status(), NextSegmentStatus::Available);
assert_eq!(drain_packets(&mut h, local_addr, remote_addr), Ok(1));
let remote_tuple = ConnectionTuple::new(remote_addr, remote_port);
let remote_tuple2 = ConnectionTuple::new(remote_addr, remote_port + 1);
// Also, there should be a retransmission timer associated with the previous SYNACK now.
assert_eq!(h.active_connections.len(), 0);
let old_timeout_value = if let Some((t, tuple)) = h.next_timeout {
assert_eq!(tuple, remote_tuple);
t
} else {
panic!("missing first expected timeout");
};
// Using the same SYN again will route the packet to the previous connection, and not
// create a new one.
assert_eq!(h.receive_packet(&p, mock_callback), Ok(RecvEvent::Nothing));
assert_eq!(h.connections.len(), 1);
// SYNACK retransmission.
assert_eq!(drain_packets(&mut h, local_addr, remote_addr), Ok(1));
// The timeout value should've gotten updated.
assert_eq!(h.active_connections.len(), 0);
if let Some((t, tuple)) = h.next_timeout {
assert_eq!(tuple, remote_tuple);
// The current Endpoint implementation gets timestamps using timestamp_cycles(), which
// increases VERY fast so the following inequality is guaranteed to be true. If the
// timestamp source gets coarser at some point, we might need an explicit wait before
// the previous h.receive_packet() :-s
assert!(t > old_timeout_value);
} else {
panic!("missing second expected timeout");
};
// Let's ACK the SYNACK.
{
let seq = h.connections[&remote_tuple].connection().first_not_sent().0;
inner_tcp_mut(&mut p)
.set_flags_after_ns(TcpFlags::ACK)
.set_ack_number(seq);
assert_eq!(h.receive_packet(&p, mock_callback), Ok(RecvEvent::Nothing));
}
// There should be no more active connections now, and also no pending timeout.
assert_eq!(h.active_connections.len(), 0);
assert_eq!(h.next_timeout, None);
// Make p a SYN packet again.
inner_tcp_mut(&mut p).set_flags_after_ns(TcpFlags::SYN);
// Create a new connection, from a different remote_port.
inner_tcp_mut(&mut p).set_source_port(remote_port + 1);
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::NewConnectionSuccessful)
);
assert_eq!(h.connections.len(), 2);
assert_eq!(h.active_connections.len(), 1);
// SYNACK
assert_eq!(drain_packets(&mut h, local_addr, remote_addr), Ok(1));
// The timeout associated with the SYNACK of the second connection should be next.
assert_eq!(h.active_connections.len(), 0);
if let Some((_, tuple)) = h.next_timeout {
assert_ne!(tuple, ConnectionTuple::new(remote_addr, remote_port));
} else {
panic!("missing third expected timeout");
}
// No more room for another one.
{
let port = remote_port + 2;
inner_tcp_mut(&mut p).set_source_port(port);
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::NewConnectionDropped)
);
assert_eq!(h.connections.len(), 2);
// We should get a RST.
assert_eq!(h.rst_queue.len(), 1);
let s = next_written_segment(&mut h, buf2.as_mut(), WriteEvent::Nothing);
assert!(s.flags_after_ns().intersects(TcpFlags::RST));
assert_eq!(s.destination_port(), port);
}
// Let's make the second endpoint evictable.
h.connections
.get_mut(&remote_tuple2)
.unwrap()
.set_eviction_threshold(0);
// The new connection will replace the old one.
assert_eq!(
h.receive_packet(&p, mock_callback),
Ok(RecvEvent::NewConnectionReplacing)
);
assert_eq!(h.connections.len(), 2);
assert_eq!(h.active_connections.len(), 1);
// One SYNACK for the new connection, and one RST for the old one.
assert_eq!(h.rst_queue.len(), 1);
assert_eq!(drain_packets(&mut h, local_addr, remote_addr), Ok(2));
assert_eq!(h.rst_queue.len(), 0);
assert_eq!(h.active_connections.len(), 0);
// Let's send another SYN to the first connection. This should make it reappear among the
// active connections (because it will have a RST to send), and then cause it to be removed
// altogether after sending the RST (because is_done() will be true).
inner_tcp_mut(&mut p).set_source_port(remote_port);
assert_eq!(h.receive_packet(&p, mock_callback), Ok(RecvEvent::Nothing));
assert_eq!(h.active_connections.len(), 1);
assert_eq!(drain_packets(&mut h, local_addr, remote_addr), Ok(1));
assert_eq!(h.connections.len(), 1);
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/tcp/endpoint.rs | src/vmm/src/dumbo/tcp/endpoint.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// When designing the MMDS, we thought about a split in functionality, were we have some basic
// building blocks (such as the simplified TCP implementation, and the micro HTTP server) which can
// even be exported as libraries at some point, and then we have things built on top of those.
// That's why the Connection struct (and the upcoming TcpHandler) do not log things, or increase
// metrics, but rather express status via return values. The Endpoint struct implements our HTTP
// based interaction with the MMDS, making use of the aforementioned building blocks, and is
// totally specific to Firecracker. Ideally, the current crate should only contain the generic
// components, but since the separation/interface is not very well defined yet, we keep the
// Endpoint in here too for the time being.
use std::fmt::Debug;
use std::num::{NonZeroU16, NonZeroU64, Wrapping};
use micro_http::{Body, Request, RequestError, Response, StatusCode, Version};
use utils::time::timestamp_cycles;
use crate::dumbo::pdu::Incomplete;
use crate::dumbo::pdu::bytes::NetworkBytes;
use crate::dumbo::pdu::tcp::TcpSegment;
use crate::dumbo::tcp::connection::{Connection, PassiveOpenError, RecvStatusFlags};
use crate::dumbo::tcp::{MAX_WINDOW_SIZE, NextSegmentStatus, seq_after};
use crate::logger::{IncMetric, METRICS};
// TODO: These are currently expressed in cycles. Normally, they would be the equivalent of a
// certain duration, depending on the frequency of the CPU, but we still have a bit to go until
// that functionality is available, so we just use some conservative-ish values. Even on a fast
// 4GHz CPU, the first is roughly equal to 10 seconds, and the other is ~300 ms.
const EVICTION_THRESHOLD: u64 = 40_000_000_000;
const CONNECTION_RTO_PERIOD: u64 = 1_200_000_000;
const CONNECTION_RTO_COUNT_MAX: u16 = 15;
// This is one plus the size of the largest bytestream carrying an HTTP request we are willing to
// accept. It's limited in order to have a bound on memory usage. This value should be plenty for
// imaginable regular MMDS requests.
// TODO: Maybe at some point include this in the checks we do when populating the MMDS via the API,
// since it effectively limits the size of the keys (URIs) we're willing to use.
const RCV_BUF_MAX_SIZE: u32 = 2500;
// Represents the local endpoint of a HTTP over TCP connection which carries GET requests
// to the MMDS.
#[derive(Debug)]
pub struct Endpoint {
// A fixed size buffer used to store bytes received via TCP. If the current request does not
// fit within, we reset the connection, since we see this as a hard memory bound.
receive_buf: [u8; RCV_BUF_MAX_SIZE as usize],
// Represents the next available position in the buffer.
receive_buf_left: usize,
// This is filled with the HTTP response bytes after we parse a request and generate the reply.
response_buf: Vec<u8>,
// Initial response sequence, used to track if the entire `response_buf` was sent.
initial_response_seq: Wrapping<u32>,
// Represents the sequence number associated with the first byte from response_buf.
response_seq: Wrapping<u32>,
// The TCP connection that does all the receiving/sending work.
connection: Connection,
// Timestamp (in cycles) associated with the most recent reception of a segment.
last_segment_received_timestamp: u64,
// These many time units have to pass since receiving the last segment to make the current
// Endpoint evictable.
eviction_threshold: u64,
// We ignore incoming segments when this is set, and that happens when we decide to reset
// the connection (or it decides to reset itself).
stop_receiving: bool,
}
// The "contract" for the Endpoint (if it implemented a trait or something) is something along
// these lines:
// - Incoming segments are passed by calling receive_segment().
// - To check whether the Endpoint has something to transmit, we must call write_next_segment()
// (the buf parameter should point to where the TCP segment begins). This function will return
// None if there's nothing to write (or there was an error writing, in which case it also
// increases a metric).
// - After calling either of the previous functions, the user should also call is_done() to see
// if the Endpoint is finished.
// - The is_evictable() function returns true if the Endpoint can be destroyed as far as its
// internal logic is concerned. It's going to be used by the connection handler when trying to
// find a new slot for incoming connections if none are free (when replacing an existing connection
// is the only option).
impl Endpoint {
/// Creates a new Endpoint from a [`crate::tcp::connection::Connection`]
/// ## Arguments:
/// - `segment`: The incoming `SYN`.
/// - `eviction_threshold`: CPU cycles that must elapse before this Endpoint is evictable
/// - `connection_rto_period`: How long the connection waits before a retransmission timeout
/// fires for the first segment which has not been acknowledged yet. This uses an opaque time
/// unit.
/// - `connection_rto_count_max`: How many consecutive timeout-based retransmission may occur
/// before the connection resets itself.
/// ## Panics:
/// - `assert!(RCV_BUF_MAX_SIZE <= MAX_WINDOW_SIZE as usize);`
pub fn new<T: NetworkBytes + Debug>(
segment: &TcpSegment<T>,
eviction_threshold: NonZeroU64,
connection_rto_period: NonZeroU64,
connection_rto_count_max: NonZeroU16,
) -> Result<Self, PassiveOpenError> {
// This simplifies things, and is a very reasonable assumption.
#[allow(clippy::assertions_on_constants)]
{
assert!(RCV_BUF_MAX_SIZE <= MAX_WINDOW_SIZE);
}
let connection = Connection::passive_open(
segment,
RCV_BUF_MAX_SIZE,
connection_rto_period,
connection_rto_count_max,
)?;
Ok(Endpoint {
receive_buf: [0u8; RCV_BUF_MAX_SIZE as usize],
receive_buf_left: 0,
response_buf: Vec::new(),
// TODO: Using first_not_sent() makes sense here because a connection is currently
// created via passive open only, so this points to the sequence number right after
// the SYNACK. It might stop working like that if/when the implementation changes.
response_seq: connection.first_not_sent(),
initial_response_seq: connection.first_not_sent(),
connection,
last_segment_received_timestamp: timestamp_cycles(),
eviction_threshold: eviction_threshold.get(),
stop_receiving: false,
})
}
pub fn new_with_defaults<T: NetworkBytes + Debug>(
segment: &TcpSegment<T>,
) -> Result<Self, PassiveOpenError> {
// The unwraps are safe because the constants are greater than 0.
Self::new(
segment,
NonZeroU64::new(EVICTION_THRESHOLD).unwrap(),
NonZeroU64::new(CONNECTION_RTO_PERIOD).unwrap(),
NonZeroU16::new(CONNECTION_RTO_COUNT_MAX).unwrap(),
)
}
pub fn receive_segment<T: NetworkBytes + Debug, F: FnOnce(Request) -> Response>(
&mut self,
s: &TcpSegment<T>,
callback: F,
) {
if self.stop_receiving {
return;
}
let now = timestamp_cycles();
self.last_segment_received_timestamp = now;
// As long as new segments arrive, we save data in the buffer. We don't have to worry
// about writing out of bounds because we set the receive window of the connection to
// match the size of the buffer. When space frees up, we'll advance the window
// accordingly.
let (value, status) = match self.connection.receive_segment(
s,
&mut self.receive_buf[self.receive_buf_left..],
now,
) {
Ok(pair) => pair,
Err(_) => {
METRICS.mmds.rx_accepted_err.inc();
return;
}
};
if !status.is_empty() {
METRICS.mmds.rx_accepted_unusual.inc();
if status.intersects(RecvStatusFlags::CONN_RESETTING) {
self.stop_receiving = true;
return;
}
}
// Advance receive_buf_left by how many bytes were actually written.
if let Some(len) = value {
self.receive_buf_left += len.get();
};
// The unwrap here should be safe because we assert the size whenever we append to
// response_buf.
if !self.response_buf.is_empty()
&& self.connection.highest_ack_received()
== self.initial_response_seq
+ Wrapping(u32::try_from(self.response_buf.len()).unwrap())
{
// If we got here, then we still have some response bytes to send (which are
// stored in self.response_buf).
// It seems we just received the last ACK we were waiting for, so the entire
// response has been successfully received. Set the new response_seq and clear
// the response_buf.
self.response_seq = self.connection.highest_ack_received();
self.initial_response_seq = self.response_seq;
self.response_buf.clear();
}
if self.response_buf.is_empty() {
// There's no pending response currently, so we're back to waiting for a request to be
// available in self.receive_buf.
// The following is some ugly but workable code that attempts to find the end of an
// HTTP 1.x request in receive_buf. We need to do this for now because
// parse_request_bytes() expects the entire request contents as parameter.
if self.receive_buf_left > 2 {
let b = self.receive_buf.as_mut();
for i in 0..self.receive_buf_left - 1 {
// We're basically looking for a double new line, which can only appear at the
// end of a valid request.
if b[i] == b'\n' {
let end = if b[i + 1] == b'\n' {
i + 2
} else if i + 3 <= self.receive_buf_left && &b[i + 1..i + 3] == b"\r\n" {
i + 3
} else {
continue;
};
// We found a potential request, let's parse it.
let response = parse_request_bytes(&b[..end], callback);
// The unwrap is safe because a Vec will allocate more space until all the
// writes succeed.
response.write_all(&mut self.response_buf).unwrap();
// Sanity check because the current logic operates under this assumption.
assert!(self.response_buf.len() < u32::MAX as usize);
// We have to remove the bytes up to end from receive_buf, by shifting the
// others to the beginning of the buffer, and updating receive_buf_left.
// Also, advance the rwnd edge of the inner connection.
b.copy_within(end.., 0);
self.receive_buf_left -= end;
// Safe to unwrap because we assert that the response buffer is small
// enough.
self.connection
.advance_local_rwnd_edge(u32::try_from(end).unwrap());
break;
}
}
}
if self.receive_buf_left == self.receive_buf.len() {
// If we get here the buffer is full, but we still couldn't identify the end of a
// request, so we reset because we are over the maximum request size.
self.connection.reset();
self.stop_receiving = true;
return;
}
}
// We close the connection after receiving a FIN, and making sure there are no more
// responses to send.
if self.connection.fin_received() && self.response_buf.is_empty() {
self.connection.close();
}
}
pub fn write_next_segment<'a>(
&mut self,
buf: &'a mut [u8],
mss_reserved: u16,
) -> Option<Incomplete<TcpSegment<'a, &'a mut [u8]>>> {
let tcp_payload_src = if !self.response_buf.is_empty() {
let offset = self.response_seq - self.initial_response_seq;
Some((
self.response_buf.split_at(offset.0 as usize).1,
self.response_seq,
))
} else {
None
};
match self.connection.write_next_segment(
buf,
mss_reserved,
tcp_payload_src,
timestamp_cycles(),
) {
Ok(write_result) => write_result.inspect(|segment| {
self.response_seq += Wrapping(u32::from(segment.inner().payload_len()));
}),
Err(_) => {
METRICS.mmds.tx_errors.inc();
None
}
}
}
#[inline]
pub fn is_done(&self) -> bool {
self.connection.is_done()
}
#[inline]
pub fn is_evictable(&self) -> bool {
timestamp_cycles().wrapping_sub(self.last_segment_received_timestamp)
> self.eviction_threshold
}
pub fn next_segment_status(&self) -> NextSegmentStatus {
let can_send_new_data = !self.response_buf.is_empty()
&& seq_after(
self.connection.remote_rwnd_edge(),
self.connection.first_not_sent(),
);
if can_send_new_data || self.connection.dup_ack_pending() {
NextSegmentStatus::Available
} else {
self.connection.control_segment_or_timeout_status()
}
}
#[inline]
pub fn connection(&self) -> &Connection {
&self.connection
}
}
fn build_response(status_code: StatusCode, body: Body) -> Response {
let mut response = Response::new(Version::default(), status_code);
response.set_body(body);
response
}
/// Parses the request bytes and builds a `micro_http::Response` by the given callback function.
fn parse_request_bytes<F: FnOnce(Request) -> Response>(
byte_stream: &[u8],
callback: F,
) -> Response {
let request = Request::try_from(byte_stream, None);
match request {
Ok(request) => callback(request),
Err(err) => match err {
RequestError::BodyWithoutPendingRequest
| RequestError::HeadersWithoutPendingRequest
| RequestError::Overflow
| RequestError::Underflow => {
build_response(StatusCode::BadRequest, Body::new(err.to_string()))
}
RequestError::InvalidUri(err_msg) => {
build_response(StatusCode::BadRequest, Body::new(err_msg.to_string()))
}
RequestError::InvalidHttpVersion(err_msg)
| RequestError::InvalidHttpMethod(err_msg) => {
build_response(StatusCode::NotImplemented, Body::new(err_msg.to_string()))
}
RequestError::HeaderError(err_msg) => {
build_response(StatusCode::BadRequest, Body::new(err_msg.to_string()))
}
RequestError::InvalidRequest => build_response(
StatusCode::BadRequest,
Body::new("Invalid request.".to_string()),
),
RequestError::SizeLimitExceeded(_, _) => {
build_response(StatusCode::PayloadTooLarge, Body::new(err.to_string()))
}
},
}
}
#[cfg(test)]
mod tests {
use std::str::from_utf8;
use super::*;
use crate::dumbo::pdu::tcp::Flags as TcpFlags;
use crate::dumbo::tcp::connection::tests::ConnectionTester;
use crate::dumbo::tcp::tests::mock_callback;
impl Endpoint {
pub fn set_eviction_threshold(&mut self, value: u64) {
self.eviction_threshold = value;
}
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_endpoint() {
let mut buf1 = [0u8; 500];
let mut buf2 = [0u8; 500];
let mut write_buf = [0u8; RCV_BUF_MAX_SIZE as usize + 100];
let mut t = ConnectionTester::new();
let mut syn = t.write_syn(buf1.as_mut());
// Put another flag on the SYN so it becomes invalid.
syn.set_flags_after_ns(TcpFlags::ACK);
assert_eq!(
Endpoint::new_with_defaults(&syn).unwrap_err(),
PassiveOpenError::InvalidSyn
);
// Fix the SYN and create an endpoint.
syn.set_flags_after_ns(TcpFlags::SYN);
let remote_isn = syn.sequence_number();
let mut endpoint = Endpoint::new_with_defaults(&syn).unwrap();
// Let's complete the three-way handshake. The next segment sent by the endpoint should
// be a SYNACK.
assert_eq!(endpoint.next_segment_status(), NextSegmentStatus::Available);
let endpoint_isn = {
// We need this block to delimit the mut borrow of write_buf.
let s = endpoint
.write_next_segment(write_buf.as_mut(), t.mss_reserved)
.unwrap();
assert_eq!(s.inner().flags_after_ns(), TcpFlags::SYN | TcpFlags::ACK);
s.inner().sequence_number()
};
// A RTO should be pending until the SYNACK is ACKed.
if let NextSegmentStatus::Timeout(_) = endpoint.next_segment_status() {
assert_eq!(
endpoint.next_segment_status(),
endpoint.connection().control_segment_or_timeout_status()
);
} else {
panic!("missing expected timeout.");
}
// And now we ACK the SYNACK.
let mut ctrl = t.write_ctrl(buf2.as_mut());
ctrl.set_flags_after_ns(TcpFlags::ACK);
ctrl.set_ack_number(endpoint_isn.wrapping_add(1));
assert!(!endpoint.connection.is_established());
endpoint.receive_segment(&ctrl, mock_callback);
assert!(endpoint.connection.is_established());
// Also, there should be nothing to send now anymore, nor any timeout pending.
assert_eq!(endpoint.next_segment_status(), NextSegmentStatus::Nothing);
// Incomplete because it's missing the newlines at the end.
let incomplete_request = b"GET http://169.254.169.255/asdfghjkl HTTP/1.1";
{
let mut data = t.write_data(write_buf.as_mut(), incomplete_request.as_ref());
data.set_flags_after_ns(TcpFlags::ACK);
data.set_sequence_number(remote_isn.wrapping_add(1));
data.set_ack_number(endpoint_isn.wrapping_add(1));
endpoint.receive_segment(&data, mock_callback);
}
assert_eq!(endpoint.receive_buf_left, incomplete_request.len());
// 1 for the SYN.
let mut remote_first_not_sent =
remote_isn.wrapping_add(1 + u32::try_from(incomplete_request.len()).unwrap());
// The endpoint should write an ACK at this point.
{
assert_eq!(endpoint.next_segment_status(), NextSegmentStatus::Available);
let s = endpoint
.write_next_segment(write_buf.as_mut(), t.mss_reserved)
.unwrap();
assert_eq!(s.inner().flags_after_ns(), TcpFlags::ACK);
assert_eq!(s.inner().ack_number(), remote_first_not_sent);
}
// There should be nothing else to send.
assert_eq!(endpoint.next_segment_status(), NextSegmentStatus::Nothing);
let rest_of_the_request = b"\r\n\r\n";
// Let's also send the newlines.
{
let mut data = t.write_data(write_buf.as_mut(), rest_of_the_request.as_ref());
data.set_flags_after_ns(TcpFlags::ACK);
data.set_sequence_number(remote_first_not_sent);
data.set_ack_number(endpoint_isn + 1);
endpoint.receive_segment(&data, mock_callback);
}
remote_first_not_sent =
remote_first_not_sent.wrapping_add(rest_of_the_request.len().try_into().unwrap());
let mut endpoint_first_not_sent;
// We should get a data segment that also ACKs the latest bytes received.
{
assert_eq!(endpoint.next_segment_status(), NextSegmentStatus::Available);
let s = endpoint
.write_next_segment(write_buf.as_mut(), t.mss_reserved)
.unwrap();
assert_eq!(s.inner().flags_after_ns(), TcpFlags::ACK);
assert_eq!(s.inner().ack_number(), remote_first_not_sent);
let response = from_utf8(s.inner().payload()).unwrap();
// The response should contain "200" because the HTTP request is correct.
assert!(response.contains("200"));
endpoint_first_not_sent = s
.inner()
.sequence_number()
.wrapping_add(u32::from(s.inner().payload_len()));
}
// Cool, now let's check that even though receive_buf is limited to some value, we can
// respond to any number of requests, as long as each fits individually inside the buffer.
// We're going to use the simple approach where we send the same request over and over
// again, for a relatively large number of iterations.
let complete_request = b"GET http://169.254.169.255/asdfghjkl HTTP/1.1\r\n\r\n";
let last_request = b"GET http://169.254.169.255/asdfghjkl HTTP/1.1\r\n\r\n123";
// Send one request for each byte in receive_buf, just to be sure.
let max_iter = endpoint.receive_buf.len();
for i in 1..=max_iter {
// We want to use last_request for the last request.
let request = if i == max_iter {
last_request.as_ref()
} else {
complete_request.as_ref()
};
// Send request.
{
let mut data = t.write_data(write_buf.as_mut(), request);
data.set_flags_after_ns(TcpFlags::ACK);
data.set_sequence_number(remote_first_not_sent);
data.set_ack_number(endpoint_first_not_sent);
endpoint.receive_segment(&data, mock_callback);
}
remote_first_not_sent =
remote_first_not_sent.wrapping_add(request.len().try_into().unwrap());
// Check response.
{
let s = endpoint
.write_next_segment(write_buf.as_mut(), t.mss_reserved)
.unwrap();
assert_eq!(s.inner().flags_after_ns(), TcpFlags::ACK);
assert_eq!(s.inner().ack_number(), remote_first_not_sent);
let response = from_utf8(s.inner().payload()).unwrap();
assert!(response.contains("200"));
endpoint_first_not_sent =
endpoint_first_not_sent.wrapping_add(u32::from(s.inner().payload_len()));
}
}
// The value of receive_buf_left should be 3 right now, because of the trailing chars from
// last_request.
assert_eq!(endpoint.receive_buf_left, 3);
// Unless the machine running the tests is super slow for some reason, we should be nowhere
// near the expiry of the eviction timer.
assert!(!endpoint.is_evictable());
// Let's hack this a bit and change the eviction_threshold to 0.
endpoint.set_eviction_threshold(0);
// The endpoint should be evictable now.
assert!(endpoint.is_evictable());
// Finally, let's fill self.receive_buf with the following request, and see if we get the
// reset we expect on the next segment.
let request_to_fill = vec![0u8; RCV_BUF_MAX_SIZE as usize - endpoint.receive_buf_left];
{
// Hack: have to artificially increase t.mss to create this segment which is 2k+.
t.mss = RCV_BUF_MAX_SIZE.try_into().unwrap();
let mut data = t.write_data(write_buf.as_mut(), request_to_fill.as_ref());
data.set_flags_after_ns(TcpFlags::ACK);
data.set_sequence_number(remote_first_not_sent);
data.set_ack_number(endpoint_first_not_sent);
endpoint.receive_segment(&data, mock_callback);
}
{
let s = endpoint
.write_next_segment(write_buf.as_mut(), t.mss_reserved)
.unwrap();
assert_eq!(s.inner().flags_after_ns(), TcpFlags::RST);
}
}
#[test]
fn test_parse_request_bytes_error() {
// Test unsupported HTTP version.
let request_bytes = b"GET http://169.254.169.255/ HTTP/2.0\r\n\r\n";
let mut expected_response = Response::new(Version::Http11, StatusCode::NotImplemented);
expected_response.set_body(Body::new("Unsupported HTTP version.".to_string()));
let actual_response = parse_request_bytes(request_bytes, mock_callback);
assert_eq!(actual_response, expected_response);
// Test invalid URI (empty URI).
let request_bytes = b"GET HTTP/1.0\r\n\r\n";
let mut expected_response = Response::new(Version::Http11, StatusCode::BadRequest);
expected_response.set_body(Body::new("Empty URI not allowed.".to_string()));
let actual_response = parse_request_bytes(request_bytes, mock_callback);
assert_eq!(actual_response, expected_response);
// Test invalid HTTP methods.
let invalid_methods = ["POST", "HEAD", "DELETE", "CONNECT", "OPTIONS", "TRACE"];
for method in invalid_methods.iter() {
let request_bytes = format!("{} http://169.254.169.255/ HTTP/1.0\r\n\r\n", method);
let mut expected_response = Response::new(Version::Http11, StatusCode::NotImplemented);
expected_response.set_body(Body::new("Unsupported HTTP method.".to_string()));
let actual_response = parse_request_bytes(request_bytes.as_bytes(), mock_callback);
assert_eq!(actual_response, expected_response);
}
// Test valid methods.
let valid_methods = ["PUT", "PATCH", "GET"];
for method in valid_methods.iter() {
let request_bytes = format!("{} http://169.254.169.255/ HTTP/1.0\r\n\r\n", method);
let expected_response = Response::new(Version::Http11, StatusCode::OK);
let actual_response = parse_request_bytes(request_bytes.as_bytes(), mock_callback);
assert_eq!(actual_response, expected_response);
}
// Test invalid HTTP format.
let request_bytes = b"GET / HTTP/1.1\r\n";
let mut expected_response = Response::new(Version::Http11, StatusCode::BadRequest);
expected_response.set_body(Body::new("Invalid request.".to_string()));
let actual_response = parse_request_bytes(request_bytes, mock_callback);
assert_eq!(actual_response, expected_response);
// Test invalid HTTP headers.
let request_bytes = b"PATCH http://localhost/home HTTP/1.1\r\n\
Expect: 100-continue\r\n\
Transfer-Encoding: identity; q=0\r\n\
Content-Length: 26\r\n\r\nthis is not\n\r\na json \nbody";
assert!(
parse_request_bytes(request_bytes, mock_callback)
.body()
.is_none()
);
let request_bytes = b"PATCH http://localhost/home HTTP/1.1\r\n\
Expect: 100-continue\r\n\
Transfer-Encoding: identity; q=0\r\n\
Content-Length: alpha\r\n\r\nthis is not\n\r\na json \nbody";
let mut expected_response = Response::new(Version::Http11, StatusCode::BadRequest);
expected_response.set_body(Body::new(
"Invalid value. Key:Content-Length; Value: alpha".to_string(),
));
let actual_response = parse_request_bytes(request_bytes, mock_callback);
assert_eq!(actual_response, expected_response);
let request_bytes = b"PATCH http://localhost/home HTTP/1.1\r\n\
Expect: 100-continue\r\n\
Transfer-Encoding: identity; q=0\r\n\
Content-Length: 67\r\n\
Accept-Encoding: deflate, compress, *;q=0\r\n\r\nthis is not\n\r\na json \nbody";
let mut expected_response = Response::new(Version::Http11, StatusCode::BadRequest);
expected_response.set_body(Body::new(
"Invalid value. Key:Accept-Encoding; Value: *;q=0".to_string(),
));
let actual_response = parse_request_bytes(request_bytes, mock_callback);
assert_eq!(actual_response, expected_response);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/pdu/bytes.rs | src/vmm/src/dumbo/pdu/bytes.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines traits which allow byte slices to be interpreted as sequences of bytes that stand for
//! different values packed together using network byte ordering (such as network packets).
//!
//! The main use of these traits is reading and writing numerical values at a given offset in the
//! underlying slice. Why are they needed? Given a byte slice, there are two approaches to
//! reading/writing packet data that come to mind:
//!
//! (1) Have structs which represent the potential contents of each packet type, unsafely cast the
//! bytes slice to a struct pointer/reference (after doing the required checks), and then use the
//! newly obtained pointer/reference to access the data.
//!
//! (2) Access fields by reading bytes at the appropriate offset from the original slice.
//!
//! The first solution looks more appealing at first, but it requires some unsafe code. Moreover,
//! de-referencing unaligned pointers or references is considered undefined behaviour in Rust, and
//! it's not clear whether this undermines the approach or not. Until any further developments,
//! the second option is used, based on the `NetworkBytes` implementation.
//!
//! What's with the `T: Deref<Target = [u8]>`? Is there really a need to be that generic?
//! Not really. The logic in this crate currently expects to work with byte slices (`&[u8]` and
//! `&mut [u8]`), but there's a significant inconvenience. Consider `NetworkBytes` is defined as:
//!
//! ```
//! struct NetworkBytes<'a> {
//! bytes: &'a [u8],
//! }
//! ```
//!
//! This is perfectly fine for reading values from immutable slices, but what about writing values?
//! Implementing methods such as `fn write_something(&mut self)`, is not really possible, because
//! even with a mutable reference to `self`, `self.bytes` is still an immutable slice. On the other
//! hand, `NetworkBytes` can be defined as:
//!
//! ```
//! struct NetworkBytes<'a> {
//! bytes: &'a mut [u8],
//! }
//! ```
//!
//! This allows both reads and writes, but requires a mutable reference at all times (and it looks
//! weird to use one for immutable operations). This is where one interesting feature of Rust
//! comes in handy; given a type `Something<T>`, it's possible to implement different features
//! depending on trait bounds on `T`. For `NetworkBytes`, if `T` implements `Deref<Target = [u8]>`
//! (which `&[u8]` does), read operations are possible to define. If `T` implements
//! `DerefMut<Target = [u8]>`, write operations are also a possibility. Since
//! `DerefMut<Target = [u8]>` implies `Deref<Target = [u8]>`, `NetworkBytes<&mut [u8]>` implements
//! both read and write operations.
//!
//! This can theoretically lead to code bloat when using both `&[u8]` and `&mut [u8]` (as opposed
//! to just `&mut [u8]`), but most calls should be inlined anyway, so it probably doesn't matter
//! in the end. `NetworkBytes` itself implements `Deref` (and `DerefMut` when `T: DerefMut`), so
//! this line of reasoning can be extended to structs which represent different kinds of protocol
//! data units (such as IPv4 packets, Ethernet frames, etc.).
//!
//! Finally, why `Deref` and not something like `AsRef`? The answer is `Deref` coercion, which in
//! this case means that a `NetworkBytes` value will automatically coerce to `&[u8]`
//! (or `&mut [u8]`), without having to go through an explicit `as_ref()` call, which makes the
//! code easier to work with.
//!
//! Method names have the **unchecked** suffix as a reminder they do not check whether the
//! read/write goes beyond the boundaries of a slice. Callers must take the necessary precautions
//! to avoid panics.
use std::fmt::Debug;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use crate::utils::byte_order;
/// Represents an immutable view into a sequence of bytes which stands for different values packed
/// together using network byte ordering.
pub trait NetworkBytes: Deref<Target = [u8]> {
/// Reads an `u16` value from the specified offset, converting it to host byte ordering.
///
/// # Panics
///
/// This method will panic if `offset` is invalid.
#[inline]
fn ntohs_unchecked(&self, offset: usize) -> u16 {
// The unwrap() can fail when the offset is invalid, or there aren't enough bytes (2 in this
// case) left until the end of the slice. The caller must ensure this doesn't happen (hence
// the `unchecked` suffix).
byte_order::read_be_u16(&self[offset..])
}
/// Reads an `u32` value from the specified offset, converting it to host byte ordering.
///
/// # Panics
///
/// This method will panic if `offset` is invalid.
#[inline]
fn ntohl_unchecked(&self, offset: usize) -> u32 {
byte_order::read_be_u32(&self[offset..])
}
/// Shrinks the current slice to the given `len`.
///
/// Does not check whether `len` is actually smaller than `self.len()`.
///
/// # Panics
///
/// This method will panic if `len` is greater than `self.len()`.
fn shrink_unchecked(&mut self, len: usize);
}
/// Offers mutable access to a sequence of bytes which stands for different values packed
/// together using network byte ordering.
pub trait NetworkBytesMut: NetworkBytes + DerefMut<Target = [u8]> {
/// Writes the given `u16` value at the specified `offset` using network byte ordering.
///
/// # Panics
///
/// If `value` cannot be written into `self` at the given `offset`
/// (e.g. if `offset > self.len() - size_of::<u16>()`).
#[inline]
fn htons_unchecked(&mut self, offset: usize, value: u16) {
assert!(offset <= self.len() - std::mem::size_of::<u16>());
byte_order::write_be_u16(&mut self[offset..], value)
}
/// Writes the given `u32` value at the specified `offset` using network byte ordering.
///
/// # Panics
///
/// If `value` cannot be written into `self` at the given `offset`
/// (e.g. if `offset > self.len() - size_of::<u32>()`).
#[inline]
fn htonl_unchecked(&mut self, offset: usize, value: u32) {
assert!(offset <= self.len() - std::mem::size_of::<u32>());
byte_order::write_be_u32(&mut self[offset..], value)
}
}
impl NetworkBytes for &[u8] {
#[inline]
fn shrink_unchecked(&mut self, len: usize) {
*self = &self[..len];
}
}
impl NetworkBytes for &mut [u8] {
#[inline]
fn shrink_unchecked(&mut self, len: usize) {
*self = &mut std::mem::take(self)[..len];
}
}
impl NetworkBytesMut for &mut [u8] {}
// This struct is used as a convenience for any type which contains a generic member implementing
// NetworkBytes with a lifetime, so we don't have to also add the PhantomData member each time. We
// use pub(super) here because we only want this to be usable by the child modules of `pdu`.
#[derive(Debug)]
pub(super) struct InnerBytes<'a, T: 'a> {
bytes: T,
phantom: PhantomData<&'a T>,
}
impl<T: Debug> InnerBytes<'_, T> {
/// Creates a new instance as a wrapper around `bytes`.
#[inline]
pub fn new(bytes: T) -> Self {
InnerBytes {
bytes,
phantom: PhantomData,
}
}
}
impl<T: Deref<Target = [u8]> + Debug> Deref for InnerBytes<'_, T> {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes.deref()
}
}
impl<T: DerefMut<Target = [u8]> + Debug> DerefMut for InnerBytes<'_, T> {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
self.bytes.deref_mut()
}
}
impl<T: NetworkBytes + Debug> NetworkBytes for InnerBytes<'_, T> {
#[inline]
fn shrink_unchecked(&mut self, len: usize) {
self.bytes.shrink_unchecked(len);
}
}
impl<T: NetworkBytesMut + Debug> NetworkBytesMut for InnerBytes<'_, T> {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn test_htons_unchecked() {
let mut buf = [u8::default(); std::mem::size_of::<u16>()];
let mut a = buf.as_mut();
a.htons_unchecked(1, u16::default());
}
#[test]
#[should_panic]
fn test_htonl_unchecked() {
let mut buf = [u8::default(); std::mem::size_of::<u32>()];
let mut a = buf.as_mut();
a.htonl_unchecked(1, u32::default());
}
#[test]
fn test_network_bytes() {
let mut buf = [0u8; 1000];
{
let mut a = buf.as_mut();
a.htons_unchecked(1, 123);
a.htonl_unchecked(100, 1234);
assert_eq!(a.ntohs_unchecked(1), 123);
assert_eq!(a.ntohl_unchecked(100), 1234);
a.shrink_unchecked(500);
assert_eq!(a.len(), 500);
assert_eq!(a.ntohs_unchecked(1), 123);
assert_eq!(a.ntohl_unchecked(100), 1234);
}
{
let mut b = buf.as_ref();
b.shrink_unchecked(500);
assert_eq!(b.len(), 500);
assert_eq!(b.ntohs_unchecked(1), 123);
assert_eq!(b.ntohl_unchecked(100), 1234);
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/pdu/ipv4.rs | src/vmm/src/dumbo/pdu/ipv4.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Contains support for parsing and writing IPv4 packets.
//!
//! A picture of the IPv4 packet header can be found [here] (watch out for the MSB 0 bit numbering).
//!
//! [here]: https://en.wikipedia.org/wiki/IPv4#Packet_structure
use std::convert::From;
use std::fmt::Debug;
use std::net::Ipv4Addr;
use crate::dumbo::pdu::bytes::{InnerBytes, NetworkBytes, NetworkBytesMut};
use crate::dumbo::pdu::{Incomplete, ethernet};
const VERSION_AND_IHL_OFFSET: usize = 0;
const DSCP_AND_ECN_OFFSET: usize = 1;
const TOTAL_LEN_OFFSET: usize = 2;
const IDENTIFICATION_OFFSET: usize = 4;
const FLAGS_AND_FRAGMENTOFF_OFFSET: usize = 6;
const TTL_OFFSET: usize = 8;
const PROTOCOL_OFFSET: usize = 9;
const HEADER_CHECKSUM_OFFSET: usize = 10;
const SOURCE_ADDRESS_OFFSET: usize = 12;
const DESTINATION_ADDRESS_OFFSET: usize = 16;
const OPTIONS_OFFSET: u8 = 20;
/// Indicates version 4 of the IP protocol
pub const IPV4_VERSION: u8 = 0x04;
/// Default TTL value
pub const DEFAULT_TTL: u8 = 1;
/// The IP protocol number associated with TCP.
pub const PROTOCOL_TCP: u8 = 0x06;
/// The IP protocol number associated with UDP.
pub const PROTOCOL_UDP: u8 = 0x11;
/// Describes the errors which may occur while handling IPv4 packets.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum Ipv4Error {
/// The header checksum is invalid.
Checksum,
/// The header length is invalid.
HeaderLen,
/// The total length of the packet is invalid.
InvalidTotalLen,
/// The length of the given slice does not match the length of the packet.
SliceExactLen,
/// The length of the given slice is less than the IPv4 header length.
SliceTooShort,
/// The version header field is invalid.
Version,
}
/// Interprets the inner bytes as an IPv4 packet.
#[derive(Debug)]
pub struct IPv4Packet<'a, T: 'a> {
bytes: InnerBytes<'a, T>,
}
#[allow(clippy::len_without_is_empty)]
impl<T: NetworkBytes + Debug> IPv4Packet<'_, T> {
/// Interpret `bytes` as an IPv4Packet without checking the validity of the header fields, and
/// the length of the inner byte sequence.
///
/// # Panics
///
/// This method does not panic, but further method calls on the resulting object may panic if
/// `bytes` contains invalid input.
#[inline]
pub fn from_bytes_unchecked(bytes: T) -> Self {
IPv4Packet {
bytes: InnerBytes::new(bytes),
}
}
/// Attempts to interpret `bytes` as an IPv4 packet, checking the validity of the header fields
/// and the length of the inner byte sequence.
pub fn from_bytes(bytes: T, verify_checksum: bool) -> Result<Self, Ipv4Error> {
let bytes_len = bytes.len();
if bytes_len < usize::from(OPTIONS_OFFSET) {
return Err(Ipv4Error::SliceTooShort);
}
let packet = IPv4Packet::from_bytes_unchecked(bytes);
let (version, header_len) = packet.version_and_header_len();
if version != IPV4_VERSION {
return Err(Ipv4Error::Version);
}
let total_len = packet.total_len() as usize;
if total_len < header_len.into() {
return Err(Ipv4Error::InvalidTotalLen);
}
if total_len != bytes_len {
return Err(Ipv4Error::SliceExactLen);
}
if header_len < OPTIONS_OFFSET {
return Err(Ipv4Error::HeaderLen);
}
// We ignore the TTL field since only routers should care about it. An end host has no
// reason really to discard an otherwise valid packet.
if verify_checksum && packet.compute_checksum_unchecked(header_len.into()) != 0 {
return Err(Ipv4Error::Checksum);
}
Ok(packet)
}
/// Returns the value of the `version` header field, and the header length.
///
/// This method returns the actual length (in bytes) of the header, and not the value of the
/// `ihl` header field).
#[inline]
pub fn version_and_header_len(&self) -> (u8, u8) {
let x = self.bytes[VERSION_AND_IHL_OFFSET];
let ihl = x & 0x0f;
let header_len = ihl << 2;
(x >> 4, header_len)
}
/// Returns the packet header length (in bytes).
#[inline]
pub fn header_len(&self) -> u8 {
let (_, header_len) = self.version_and_header_len();
header_len
}
/// Returns the values of the `dscp` and `ecn` header fields.
#[inline]
pub fn dscp_and_ecn(&self) -> (u8, u8) {
let x = self.bytes[DSCP_AND_ECN_OFFSET];
(x >> 2, x & 0b11)
}
/// Returns the value of the 'total length' header field.
#[inline]
pub fn total_len(&self) -> u16 {
self.bytes.ntohs_unchecked(TOTAL_LEN_OFFSET)
}
/// Returns the value of the `identification` header field.
#[inline]
pub fn identification(&self) -> u16 {
self.bytes.ntohs_unchecked(IDENTIFICATION_OFFSET)
}
/// Returns the values of the `flags` and `fragment offset` header fields.
#[inline]
pub fn flags_and_fragment_offset(&self) -> (u8, u16) {
let x = self.bytes.ntohs_unchecked(FLAGS_AND_FRAGMENTOFF_OFFSET);
((x >> 13) as u8, x & 0x1fff)
}
/// Returns the value of the `ttl` header field.
#[inline]
pub fn ttl(&self) -> u8 {
self.bytes[TTL_OFFSET]
}
/// Returns the value of the `protocol` header field.
#[inline]
pub fn protocol(&self) -> u8 {
self.bytes[PROTOCOL_OFFSET]
}
/// Returns the value of the `header checksum` header field.
#[inline]
pub fn header_checksum(&self) -> u16 {
self.bytes.ntohs_unchecked(HEADER_CHECKSUM_OFFSET)
}
/// Returns the source IPv4 address of the packet.
#[inline]
pub fn source_address(&self) -> Ipv4Addr {
Ipv4Addr::from(self.bytes.ntohl_unchecked(SOURCE_ADDRESS_OFFSET))
}
/// Returns the destination IPv4 address of the packet.
#[inline]
pub fn destination_address(&self) -> Ipv4Addr {
Ipv4Addr::from(self.bytes.ntohl_unchecked(DESTINATION_ADDRESS_OFFSET))
}
/// Returns a byte slice containing the payload, using the given header length value to compute
/// the payload offset.
///
/// # Panics
///
/// This method may panic if the value of `header_len` is invalid.
#[inline]
pub fn payload_unchecked(&self, header_len: usize) -> &[u8] {
self.bytes.split_at(header_len).1
}
/// Returns a byte slice that contains the payload of the packet.
#[inline]
pub fn payload(&self) -> &[u8] {
self.payload_unchecked(self.header_len().into())
}
/// Returns the length of the inner byte sequence.
///
/// This is equal to the output of the `total_len()` method for properly constructed instances
/// of `IPv4Packet`.
#[inline]
pub fn len(&self) -> usize {
self.bytes.len()
}
/// Computes and returns the packet header checksum using the provided header length.
///
/// A nice description of how this works can be found [here]. May panic for invalid values of
/// `header_len`.
///
/// # Panics
///
/// This method may panic if the value of `header_len` is invalid.
///
/// [here]: https://en.wikipedia.org/wiki/IPv4_header_checksum
pub fn compute_checksum_unchecked(&self, header_len: usize) -> u16 {
let mut sum = 0u32;
for i in 0..header_len / 2 {
sum += u32::from(self.bytes.ntohs_unchecked(i * 2));
}
while sum >> 16 != 0 {
sum = (sum & 0xffff) + (sum >> 16);
}
// Safe to unwrap due to the while loop.
!u16::try_from(sum).unwrap()
}
/// Computes and returns the packet header checksum.
#[inline]
pub fn compute_checksum(&self) -> u16 {
self.compute_checksum_unchecked(self.header_len().into())
}
}
impl<T: NetworkBytesMut + Debug> IPv4Packet<'_, T> {
/// Attempts to write an IPv4 packet header to `buf`, making sure there is enough space.
///
/// This method returns an incomplete packet, because the size of the payload might be unknown
/// at this point. IP options are not allowed, which means `header_len == OPTIONS_OFFSET`. The
/// `dscp`, `ecn`, `identification`, `flags`, and `fragment_offset` fields are set to 0. The
/// `ttl` is set to a default value. The `total_len` and `checksum` fields will be set when
/// the length of the incomplete packet is determined.
pub fn write_header(
buf: T,
protocol: u8,
src_addr: Ipv4Addr,
dst_addr: Ipv4Addr,
) -> Result<Incomplete<Self>, Ipv4Error> {
if buf.len() < usize::from(OPTIONS_OFFSET) {
return Err(Ipv4Error::SliceTooShort);
}
let mut packet = IPv4Packet::from_bytes_unchecked(buf);
packet
.set_version_and_header_len(IPV4_VERSION, OPTIONS_OFFSET)
.set_dscp_and_ecn(0, 0)
.set_identification(0)
.set_flags_and_fragment_offset(0, 0)
.set_ttl(DEFAULT_TTL)
.set_protocol(protocol)
.set_source_address(src_addr)
.set_destination_address(dst_addr);
Ok(Incomplete::new(packet))
}
/// Sets the values of the `version` and `ihl` header fields (the latter is computed from the
/// value of `header_len`).
#[inline]
pub fn set_version_and_header_len(&mut self, version: u8, header_len: u8) -> &mut Self {
let version = version << 4;
let ihl = (header_len >> 2) & 0xf;
self.bytes[VERSION_AND_IHL_OFFSET] = version | ihl;
self
}
/// Sets the values of the `dscp` and `ecn` header fields.
#[inline]
pub fn set_dscp_and_ecn(&mut self, dscp: u8, ecn: u8) -> &mut Self {
self.bytes[DSCP_AND_ECN_OFFSET] = (dscp << 2) | ecn;
self
}
/// Sets the value of the `total length` header field.
#[inline]
pub fn set_total_len(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(TOTAL_LEN_OFFSET, value);
self
}
/// Sets the value of the `identification` header field.
#[inline]
pub fn set_identification(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(IDENTIFICATION_OFFSET, value);
self
}
/// Sets the values of the `flags` and `fragment offset` header fields.
#[inline]
pub fn set_flags_and_fragment_offset(&mut self, flags: u8, fragment_offset: u16) -> &mut Self {
let value = (u16::from(flags) << 13) | fragment_offset;
self.bytes
.htons_unchecked(FLAGS_AND_FRAGMENTOFF_OFFSET, value);
self
}
/// Sets the value of the `ttl` header field.
#[inline]
pub fn set_ttl(&mut self, value: u8) -> &mut Self {
self.bytes[TTL_OFFSET] = value;
self
}
/// Sets the value of the `protocol` header field.
#[inline]
pub fn set_protocol(&mut self, value: u8) -> &mut Self {
self.bytes[PROTOCOL_OFFSET] = value;
self
}
/// Sets the value of the `header checksum` header field.
#[inline]
pub fn set_header_checksum(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(HEADER_CHECKSUM_OFFSET, value);
self
}
/// Sets the source address of the packet.
#[inline]
pub fn set_source_address(&mut self, addr: Ipv4Addr) -> &mut Self {
self.bytes
.htonl_unchecked(SOURCE_ADDRESS_OFFSET, u32::from(addr));
self
}
/// Sets the destination address of the packet.
#[inline]
pub fn set_destination_address(&mut self, addr: Ipv4Addr) -> &mut Self {
self.bytes
.htonl_unchecked(DESTINATION_ADDRESS_OFFSET, u32::from(addr));
self
}
/// Returns a mutable byte slice representing the payload of the packet, using the provided
/// header length to compute the payload offset.
///
/// # Panics
///
/// This method may panic if the value of `header_len` is invalid.
#[inline]
pub fn payload_mut_unchecked(&mut self, header_len: usize) -> &mut [u8] {
self.bytes.split_at_mut(header_len).1
}
/// Returns a mutable byte slice representing the payload of the packet.
#[inline]
pub fn payload_mut(&mut self) -> &mut [u8] {
// Can't use self.header_len() as a fn parameter on the following line, because
// the borrow checker complains. This may change when it becomes smarter.
let header_len = self.header_len();
self.payload_mut_unchecked(header_len.into())
}
}
/// An incomplete packet is one where the payload length has not been determined yet.
///
/// It can be transformed into an `IPv4Packet` by specifying the size of the payload, and
/// shrinking the inner byte sequence to be as large as the packet itself (this includes setting
/// the `total length` header field).
impl<'a, T: NetworkBytesMut + Debug> Incomplete<IPv4Packet<'a, T>> {
/// Transforms `self` into an `IPv4Packet` based on the supplied header and payload length. May
/// panic for invalid values of the input parameters.
///
/// # Panics
///
/// This method may panic if the combination of `header_len` and `payload_len` is invalid,
/// or any of the individual values are invalid.
#[inline]
pub fn with_header_and_payload_len_unchecked(
mut self,
header_len: u8,
payload_len: u16,
compute_checksum: bool,
) -> IPv4Packet<'a, T> {
let total_len = u16::from(header_len) + payload_len;
{
let packet = &mut self.inner;
// This unchecked is fine as long as total_len is smaller than the length of the
// original slice, which should be the case if our code is not wrong.
packet.bytes.shrink_unchecked(total_len.into());
// Set the total_len.
packet.set_total_len(total_len);
if compute_checksum {
// Ensure this is set to 0 first.
packet.set_header_checksum(0);
// Now compute the actual checksum.
let checksum = packet.compute_checksum_unchecked(header_len.into());
packet.set_header_checksum(checksum);
}
}
self.inner
}
/// Transforms `self` into an `IPv4Packet` based on the supplied options and payload length.
///
/// # Panics
///
/// This method may panic if the combination of `options_len` and `payload_len` is invalid,
/// or any of the individual values are invalid.
#[inline]
pub fn with_options_and_payload_len_unchecked(
self,
options_len: u8,
payload_len: u16,
compute_checksum: bool,
) -> IPv4Packet<'a, T> {
let header_len = OPTIONS_OFFSET + options_len;
self.with_header_and_payload_len_unchecked(header_len, payload_len, compute_checksum)
}
/// Transforms `self` into an `IPv4Packet` based on the supplied payload length. May panic for
/// invalid values of the input parameters.
///
/// # Panics
///
/// This method may panic if the value of `header_len` is invalid.
#[inline]
pub fn with_payload_len_unchecked(
self,
payload_len: u16,
compute_checksum: bool,
) -> IPv4Packet<'a, T> {
let header_len = self.inner().header_len();
self.with_header_and_payload_len_unchecked(header_len, payload_len, compute_checksum)
}
}
/// This function checks if `buf` may hold an IPv4Packet heading towards the given address. Cannot
/// produce false negatives.
#[inline]
pub fn test_speculative_dst_addr(buf: &[u8], addr: Ipv4Addr) -> bool {
// The unchecked methods are safe because we actually check the buffer length beforehand.
if buf.len() >= ethernet::PAYLOAD_OFFSET + usize::from(OPTIONS_OFFSET) {
let bytes = &buf[ethernet::PAYLOAD_OFFSET..];
if IPv4Packet::from_bytes_unchecked(bytes).destination_address() == addr {
return true;
}
}
false
}
#[cfg(test)]
mod tests {
use super::*;
use crate::dumbo::MacAddr;
const MAX_HEADER_LEN: u8 = 60;
#[test]
fn test_set_get() {
let mut a = [0u8; 100];
let mut p = IPv4Packet::from_bytes_unchecked(a.as_mut());
assert_eq!(p.version_and_header_len(), (0, 0));
p.set_version_and_header_len(IPV4_VERSION, 24);
assert_eq!(p.version_and_header_len(), (IPV4_VERSION, 24));
assert_eq!(p.dscp_and_ecn(), (0, 0));
p.set_dscp_and_ecn(3, 2);
assert_eq!(p.dscp_and_ecn(), (3, 2));
assert_eq!(p.total_len(), 0);
p.set_total_len(123);
assert_eq!(p.total_len(), 123);
assert_eq!(p.identification(), 0);
p.set_identification(1112);
assert_eq!(p.identification(), 1112);
assert_eq!(p.flags_and_fragment_offset(), (0, 0));
p.set_flags_and_fragment_offset(7, 1000);
assert_eq!(p.flags_and_fragment_offset(), (7, 1000));
assert_eq!(p.ttl(), 0);
p.set_ttl(123);
assert_eq!(p.ttl(), 123);
assert_eq!(p.protocol(), 0);
p.set_protocol(114);
assert_eq!(p.protocol(), 114);
assert_eq!(p.header_checksum(), 0);
p.set_header_checksum(1234);
assert_eq!(p.header_checksum(), 1234);
let addr = Ipv4Addr::new(10, 11, 12, 13);
assert_eq!(p.source_address(), Ipv4Addr::from(0));
p.set_source_address(addr);
assert_eq!(p.source_address(), addr);
assert_eq!(p.destination_address(), Ipv4Addr::from(0));
p.set_destination_address(addr);
assert_eq!(p.destination_address(), addr);
}
#[test]
fn test_constructors() {
// We fill this with 1 to notice if the appropriate values get zeroed out.
let mut buf = [1u8; 100];
let src = Ipv4Addr::new(10, 100, 11, 21);
let dst = Ipv4Addr::new(192, 168, 121, 35);
let buf_len = u16::try_from(buf.len()).unwrap();
// No IPv4 option support for now.
let header_len = OPTIONS_OFFSET;
let payload_len = buf_len - u16::from(OPTIONS_OFFSET);
{
let mut p = IPv4Packet::write_header(buf.as_mut(), PROTOCOL_TCP, src, dst)
.unwrap()
.with_header_and_payload_len_unchecked(header_len, payload_len, true);
assert_eq!(p.version_and_header_len(), (IPV4_VERSION, header_len));
assert_eq!(p.dscp_and_ecn(), (0, 0));
assert_eq!(p.total_len(), buf_len);
assert_eq!(p.identification(), 0);
assert_eq!(p.flags_and_fragment_offset(), (0, 0));
assert_eq!(p.ttl(), DEFAULT_TTL);
assert_eq!(p.protocol(), PROTOCOL_TCP);
let checksum = p.header_checksum();
p.set_header_checksum(0);
let computed_checksum = p.compute_checksum();
assert_eq!(computed_checksum, checksum);
p.set_header_checksum(computed_checksum);
assert_eq!(p.compute_checksum(), 0);
assert_eq!(p.source_address(), src);
assert_eq!(p.destination_address(), dst);
// The mutable borrow of buf will end here.
}
IPv4Packet::from_bytes(buf.as_ref(), true).unwrap();
// Now let's check some error conditions.
// Using a helper function here instead of a closure because it's hard (impossible?) to
// specify lifetime bounds for closure arguments.
fn p(buf: &mut [u8]) -> IPv4Packet<'_, &mut [u8]> {
IPv4Packet::from_bytes_unchecked(buf)
}
// Just a helper closure.
let look_for_error = |buf: &[u8], err: Ipv4Error| {
assert_eq!(IPv4Packet::from_bytes(buf, true).unwrap_err(), err);
};
// Invalid version.
p(buf.as_mut()).set_version_and_header_len(IPV4_VERSION + 1, header_len);
look_for_error(buf.as_ref(), Ipv4Error::Version);
// Short header length.
p(buf.as_mut()).set_version_and_header_len(IPV4_VERSION, OPTIONS_OFFSET - 1);
look_for_error(buf.as_ref(), Ipv4Error::HeaderLen);
// Header length too large. We have to add at least 4 here, because the setter converts
// header_len into the ihl field via division by 4, so anything less will lead to a valid
// result (the ihl corresponding to IPV4_MAX_HEADER_LEN). When decoding the header_len back
// from the packet, we'll get a smaller value than OPTIONS_OFFSET, because it wraps around
// modulo 60, since the ihl field is only four bits wide, and then gets multiplied with 4.
p(buf.as_mut()).set_version_and_header_len(IPV4_VERSION, MAX_HEADER_LEN + 4);
look_for_error(buf.as_ref(), Ipv4Error::HeaderLen);
// Total length smaller than header length.
p(buf.as_mut())
.set_version_and_header_len(IPV4_VERSION, OPTIONS_OFFSET)
.set_total_len(u16::from(OPTIONS_OFFSET) - 1);
look_for_error(buf.as_ref(), Ipv4Error::InvalidTotalLen);
// Total len not matching slice length.
p(buf.as_mut()).set_total_len(buf_len - 1);
look_for_error(buf.as_ref(), Ipv4Error::SliceExactLen);
// The original packet header should contain a valid checksum.
assert_eq!(p(buf.as_mut()).set_total_len(buf_len).compute_checksum(), 0);
// Let's make it invalid.
let checksum = p(buf.as_mut()).header_checksum();
p(buf.as_mut()).set_header_checksum(checksum.wrapping_add(1));
look_for_error(buf.as_ref(), Ipv4Error::Checksum);
// Finally, a couple of tests for a small buffer.
let mut small_buf = [0u8; 1];
look_for_error(small_buf.as_ref(), Ipv4Error::SliceTooShort);
assert_eq!(
IPv4Packet::write_header(small_buf.as_mut(), PROTOCOL_TCP, src, dst).unwrap_err(),
Ipv4Error::SliceTooShort
);
}
#[test]
fn test_incomplete() {
let mut buf = [0u8; 100];
let src = Ipv4Addr::new(10, 100, 11, 21);
let dst = Ipv4Addr::new(192, 168, 121, 35);
let payload_len = 30;
// This is kinda mandatory, since we don't implement options support yet.
let options_len = 0;
let header_len = OPTIONS_OFFSET + options_len;
{
let p = IPv4Packet::write_header(buf.as_mut(), PROTOCOL_TCP, src, dst)
.unwrap()
.with_payload_len_unchecked(payload_len, true);
assert_eq!(p.compute_checksum(), 0);
assert_eq!(p.total_len() as usize, p.len());
assert_eq!(p.len(), usize::from(header_len) + usize::from(payload_len));
}
{
let p = IPv4Packet::write_header(buf.as_mut(), PROTOCOL_TCP, src, dst)
.unwrap()
.with_options_and_payload_len_unchecked(options_len, payload_len, true);
assert_eq!(p.compute_checksum(), 0);
assert_eq!(p.total_len() as usize, p.len());
assert_eq!(p.len(), usize::from(header_len) + usize::from(payload_len));
}
}
#[test]
fn test_speculative() {
let mut buf = [0u8; 1000];
let mac = MacAddr::from_bytes_unchecked(&[0; 6]);
let ip = Ipv4Addr::new(1, 2, 3, 4);
let other_ip = Ipv4Addr::new(5, 6, 7, 8);
{
let mut eth = crate::dumbo::pdu::ethernet::EthernetFrame::write_incomplete(
buf.as_mut(),
mac,
mac,
0,
)
.unwrap();
IPv4Packet::from_bytes_unchecked(eth.inner_mut().payload_mut())
.set_destination_address(ip);
}
assert!(test_speculative_dst_addr(buf.as_ref(), ip));
{
let mut eth = crate::dumbo::pdu::ethernet::EthernetFrame::write_incomplete(
buf.as_mut(),
mac,
mac,
0,
)
.unwrap();
IPv4Packet::from_bytes_unchecked(eth.inner_mut().payload_mut())
.set_destination_address(other_ip);
}
assert!(!test_speculative_dst_addr(buf.as_ref(), ip));
let small = [0u8; 1];
assert!(!test_speculative_dst_addr(small.as_ref(), ip));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/pdu/ethernet.rs | src/vmm/src/dumbo/pdu/ethernet.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Contains support for parsing and writing Ethernet frames. Does not currently offer support for
//! 802.1Q tags.
use std::fmt::Debug;
use super::Incomplete;
use super::bytes::{InnerBytes, NetworkBytes, NetworkBytesMut};
use crate::dumbo::MacAddr;
const DST_MAC_OFFSET: usize = 0;
const SRC_MAC_OFFSET: usize = 6;
const ETHERTYPE_OFFSET: usize = 12;
// We don't support 802.1Q tags.
// TODO: support 802.1Q tags?! If so, don't forget to change the speculative_test_* functions
// for ARP and IPv4.
/// Payload offset in an ethernet frame
pub const PAYLOAD_OFFSET: usize = 14;
/// Ethertype value for ARP frames.
pub const ETHERTYPE_ARP: u16 = 0x0806;
/// Ethertype value for IPv4 packets.
pub const ETHERTYPE_IPV4: u16 = 0x0800;
/// Describes the errors which may occur when handling Ethernet frames.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum EthernetError {
/// The specified byte sequence is shorter than the Ethernet header length.
SliceTooShort,
}
/// Interprets the inner bytes as an Ethernet frame.
#[derive(Debug)]
pub struct EthernetFrame<'a, T: 'a> {
bytes: InnerBytes<'a, T>,
}
#[allow(clippy::len_without_is_empty)]
impl<T: NetworkBytes + Debug> EthernetFrame<'_, T> {
/// Interprets `bytes` as an Ethernet frame without any validity checks.
///
/// # Panics
///
/// This method does not panic, but further method calls on the resulting object may panic if
/// `bytes` contains invalid input.
#[inline]
pub fn from_bytes_unchecked(bytes: T) -> Self {
EthernetFrame {
bytes: InnerBytes::new(bytes),
}
}
/// Checks whether the specified byte sequence can be interpreted as an Ethernet frame.
#[inline]
pub fn from_bytes(bytes: T) -> Result<Self, EthernetError> {
if bytes.len() < PAYLOAD_OFFSET {
return Err(EthernetError::SliceTooShort);
}
Ok(EthernetFrame::from_bytes_unchecked(bytes))
}
/// Returns the destination MAC address.
#[inline]
pub fn dst_mac(&self) -> MacAddr {
MacAddr::from_bytes_unchecked(&self.bytes[DST_MAC_OFFSET..SRC_MAC_OFFSET])
}
/// Returns the source MAC address.
#[inline]
pub fn src_mac(&self) -> MacAddr {
MacAddr::from_bytes_unchecked(&self.bytes[SRC_MAC_OFFSET..ETHERTYPE_OFFSET])
}
/// Returns the ethertype of the frame.
#[inline]
pub fn ethertype(&self) -> u16 {
self.bytes.ntohs_unchecked(ETHERTYPE_OFFSET)
}
/// Returns the offset of the payload within the frame.
#[inline]
pub fn payload_offset(&self) -> usize {
PAYLOAD_OFFSET
}
/// Returns the payload of the frame as an `[&u8]` slice.
#[inline]
pub fn payload(&self) -> &[u8] {
self.bytes.split_at(self.payload_offset()).1
}
/// Returns the length of the frame.
#[inline]
pub fn len(&self) -> usize {
self.bytes.len()
}
}
impl<T: NetworkBytesMut + Debug> EthernetFrame<'_, T> {
/// Attempts to write an Ethernet frame using the given header fields to `buf`.
fn new_with_header(
buf: T,
dst_mac: MacAddr,
src_mac: MacAddr,
ethertype: u16,
) -> Result<Self, EthernetError> {
if buf.len() < PAYLOAD_OFFSET {
return Err(EthernetError::SliceTooShort);
}
let mut frame = EthernetFrame::from_bytes_unchecked(buf);
frame
.set_dst_mac(dst_mac)
.set_src_mac(src_mac)
.set_ethertype(ethertype);
Ok(frame)
}
/// Attempts to write an incomplete Ethernet frame (whose length is currently unknown) to `buf`,
/// using the specified header fields.
#[inline]
pub fn write_incomplete(
buf: T,
dst_mac: MacAddr,
src_mac: MacAddr,
ethertype: u16,
) -> Result<Incomplete<Self>, EthernetError> {
Ok(Incomplete::new(Self::new_with_header(
buf, dst_mac, src_mac, ethertype,
)?))
}
/// Sets the destination MAC address.
#[inline]
pub fn set_dst_mac(&mut self, addr: MacAddr) -> &mut Self {
self.bytes[DST_MAC_OFFSET..SRC_MAC_OFFSET].copy_from_slice(addr.get_bytes());
self
}
/// Sets the source MAC address.
#[inline]
pub fn set_src_mac(&mut self, addr: MacAddr) -> &mut Self {
self.bytes[SRC_MAC_OFFSET..ETHERTYPE_OFFSET].copy_from_slice(addr.get_bytes());
self
}
/// Sets the ethertype of the frame.
#[inline]
pub fn set_ethertype(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(ETHERTYPE_OFFSET, value);
self
}
/// Returns the payload of the frame as a `&mut [u8]` slice.
#[inline]
pub fn payload_mut(&mut self) -> &mut [u8] {
// We need this let to avoid confusing the borrow checker.
let offset = self.payload_offset();
self.bytes.split_at_mut(offset).1
}
}
impl<'a, T: NetworkBytes + Debug> Incomplete<EthernetFrame<'a, T>> {
/// Completes the inner frame by shrinking it to its actual length.
///
/// # Panics
///
/// This method panics if `len` is greater than the length of the inner byte sequence.
#[inline]
pub fn with_payload_len_unchecked(mut self, payload_len: usize) -> EthernetFrame<'a, T> {
let payload_offset = self.inner.payload_offset();
self.inner
.bytes
.shrink_unchecked(payload_offset + payload_len);
self.inner
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::*;
#[test]
fn test_ethernet_frame() {
let mut a = [0u8; 10000];
let mut bad_array = [0u8; 1];
let dst_mac = MacAddr::from_str("01:23:45:67:89:ab").unwrap();
let src_mac = MacAddr::from_str("cd:ef:01:23:45:67").unwrap();
let ethertype = 1289;
assert_eq!(
EthernetFrame::from_bytes(bad_array.as_ref()).unwrap_err(),
EthernetError::SliceTooShort
);
assert_eq!(
EthernetFrame::new_with_header(bad_array.as_mut(), dst_mac, src_mac, ethertype)
.unwrap_err(),
EthernetError::SliceTooShort
);
{
let mut f1 =
EthernetFrame::new_with_header(a.as_mut(), dst_mac, src_mac, ethertype).unwrap();
assert_eq!(f1.dst_mac(), dst_mac);
assert_eq!(f1.src_mac(), src_mac);
assert_eq!(f1.ethertype(), ethertype);
f1.payload_mut()[1] = 132;
}
{
let f2 = EthernetFrame::from_bytes(a.as_ref()).unwrap();
assert_eq!(f2.dst_mac(), dst_mac);
assert_eq!(f2.src_mac(), src_mac);
assert_eq!(f2.ethertype(), ethertype);
assert_eq!(f2.payload()[1], 132);
assert_eq!(f2.len(), f2.bytes.len());
}
{
let f3 =
EthernetFrame::write_incomplete(a.as_mut(), dst_mac, src_mac, ethertype).unwrap();
let f3_complete = f3.with_payload_len_unchecked(123);
assert_eq!(f3_complete.len(), f3_complete.payload_offset() + 123);
}
}
}
#[cfg(kani)]
#[allow(dead_code)] // Avoid warning when using stubs.
mod kani_proofs {
use super::*;
use crate::utils::net::mac::MAC_ADDR_LEN;
// See the Virtual I/O Device (VIRTIO) specification, Sec. 5.1.6.2.
// https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.pdf
pub const MAX_FRAME_SIZE: usize = 1514;
const MAC_ADDR_LEN_USIZE: usize = MAC_ADDR_LEN as usize;
impl<'a, T: NetworkBytesMut + Debug> EthernetFrame<'a, T> {
fn is_valid(&self) -> bool {
self.len() >= PAYLOAD_OFFSET
}
}
// We consider the MMDS Network Stack spec for all postconditions in the harnesses.
// See https://github.com/firecracker-microvm/firecracker/blob/main/docs/mmds/mmds-design.md#mmds-network-stack
#[kani::proof]
fn verify_from_bytes_unchecked() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
let slice_length = bytes.len();
// Verify from_bytes_unchecked
let ethernet = EthernetFrame::from_bytes_unchecked(bytes.as_mut());
// Check for post-conditions
assert_eq!(ethernet.len(), slice_length);
assert!(
!(ethernet.is_valid()) || (ethernet.payload().len() == slice_length - PAYLOAD_OFFSET)
);
}
#[kani::proof]
fn verify_from_bytes() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
let slice_length = bytes.len();
// Verify from_bytes
let ethernet = EthernetFrame::from_bytes(bytes.as_mut());
// Check for post-conditions
if slice_length >= PAYLOAD_OFFSET {
let ethernet = ethernet.unwrap();
assert!(ethernet.is_valid());
assert_eq!(ethernet.len(), slice_length);
assert_eq!(ethernet.payload().len(), slice_length - PAYLOAD_OFFSET);
} else {
ethernet.unwrap_err();
}
}
#[kani::proof]
fn verify_dst_mac() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
// Create valid non-deterministic ethernet
let ethernet = EthernetFrame::from_bytes(bytes.as_mut());
kani::assume(ethernet.is_ok());
let mut ethernet = ethernet.unwrap();
// Verify set_dst_mac
let mac_bytes: [u8; MAC_ADDR_LEN as usize] = kani::any();
let dst_mac = MacAddr::from(mac_bytes);
ethernet.set_dst_mac(dst_mac);
// Verify dst_mac
let dst_addr = EthernetFrame::dst_mac(ðernet);
// Check for post-conditions
// MAC addresses should always have 48 bits
assert_eq!(dst_addr.get_bytes().len(), MAC_ADDR_LEN as usize);
// Check duality between set_dst_mac and dst_mac operations
let i: usize = kani::any();
kani::assume(i < mac_bytes.len());
assert_eq!(mac_bytes[i], dst_addr.get_bytes()[i]);
}
#[kani::proof]
fn verify_src_mac() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
// Create valid non-deterministic ethernet
let ethernet = EthernetFrame::from_bytes(bytes.as_mut());
kani::assume(ethernet.is_ok());
let mut ethernet = ethernet.unwrap();
// Verify set_src_mac
let mac_bytes: [u8; MAC_ADDR_LEN as usize] = kani::any();
let src_mac = MacAddr::from(mac_bytes);
ethernet.set_src_mac(src_mac);
// Verify src_mac
let src_addr = EthernetFrame::src_mac(ðernet);
// Check for post-conditions
// MAC addresses should always have 48 bits
assert_eq!(src_addr.get_bytes().len(), MAC_ADDR_LEN as usize);
// Check duality between set_src_mac and src_mac operations
let i: usize = kani::any();
kani::assume(i < mac_bytes.len());
assert_eq!(mac_bytes[i], src_addr.get_bytes()[i]);
}
#[kani::proof]
fn verify_src_mac_isolation() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
// Create valid non-deterministic ethernet
let ethernet = EthernetFrame::from_bytes(bytes.as_mut());
kani::assume(ethernet.is_ok());
let mut ethernet = ethernet.unwrap();
// Verify set_src_mac
let mac_bytes: [u8; MAC_ADDR_LEN as usize] = kani::any();
let src_mac = MacAddr::from(mac_bytes);
ethernet.set_src_mac(src_mac);
let payload_offset = ethernet.payload_offset();
if kani::any() {
let dst_mac_bytes: [u8; MAC_ADDR_LEN as usize] = kani::any();
let dst_mac = MacAddr::from(dst_mac_bytes);
ethernet.set_dst_mac(dst_mac);
}
if kani::any() {
let ethertype_in: u16 = kani::any();
ethernet.set_ethertype(ethertype_in);
}
// Payload info doesn't change
assert_eq!(ethernet.payload_offset(), payload_offset);
// Verify src_mac
let src_addr = EthernetFrame::src_mac(ðernet);
// Check for post-conditions
// MAC addresses should always have 48 bits
assert_eq!(src_addr.get_bytes().len(), MAC_ADDR_LEN as usize);
// Check duality between set_src_mac and src_mac operations
let i: usize = kani::any();
kani::assume(i < mac_bytes.len());
assert_eq!(mac_bytes[i], src_addr.get_bytes()[i]);
}
#[kani::proof]
fn verify_ethertype() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
// Create valid non-deterministic ethernet
let ethernet = EthernetFrame::from_bytes(bytes.as_mut());
kani::assume(ethernet.is_ok());
let mut ethernet = ethernet.unwrap();
// Verify set_ethertype
let ethertype_in: u16 = kani::any();
ethernet.set_ethertype(ethertype_in);
// Verify ethertype
let ethertype_out = ethernet.ethertype();
// Check for post-conditions
// Check duality between set_ethertype and ethertype operations
assert_eq!(ethertype_in, ethertype_out);
}
#[kani::proof]
#[kani::unwind(1515)]
fn verify_payload() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
// Create valid non-deterministic ethernet
let ethernet = EthernetFrame::from_bytes(bytes.as_mut());
kani::assume(ethernet.is_ok());
let ethernet = ethernet.unwrap();
// Verify payload_offset
let payload_offset = ethernet.payload_offset();
// Verify payload()
let payload = ethernet.payload();
// Verify payload_mut
let payload_mut = ethernet.payload();
// Check for post-conditions
// Check payload_offset value
assert_eq!(payload_offset, PAYLOAD_OFFSET);
// Check equivalence
assert_eq!(payload, payload_mut);
}
#[kani::proof]
fn verify_new_with_header() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
let bytes_length = bytes.len();
// Create valid non-deterministic dst_mac
let dst_mac_bytes: [u8; MAC_ADDR_LEN as usize] =
kani::Arbitrary::any_array::<MAC_ADDR_LEN_USIZE>();
let dst_mac = MacAddr::from(dst_mac_bytes);
// Create valid non-deterministic src_mac
let src_mac_bytes: [u8; MAC_ADDR_LEN as usize] =
kani::Arbitrary::any_array::<MAC_ADDR_LEN_USIZE>();
let src_mac = MacAddr::from(src_mac_bytes);
// Create valid non-deterministic ethertype
let ethertype: u16 = kani::any();
// Verify new_with_header
let frame =
EthernetFrame::new_with_header(bytes.as_mut(), dst_mac, src_mac, ethertype).unwrap();
// Check for post-conditions
assert_eq!(frame.dst_mac(), dst_mac);
assert_eq!(frame.src_mac(), src_mac);
assert_eq!(frame.ethertype(), ethertype);
assert_eq!(frame.len(), bytes_length);
assert!(frame.is_valid() && (frame.payload().len() == bytes_length - PAYLOAD_OFFSET));
}
#[kani::proof]
fn verify_write_incomplete() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
// Create valid non-deterministic dst_mac
let dst_mac_bytes: [u8; MAC_ADDR_LEN as usize] =
kani::Arbitrary::any_array::<MAC_ADDR_LEN_USIZE>();
let dst_mac = MacAddr::from(dst_mac_bytes);
// Create valid non-deterministic src_mac
let src_mac_bytes: [u8; MAC_ADDR_LEN as usize] =
kani::Arbitrary::any_array::<MAC_ADDR_LEN_USIZE>();
let src_mac = MacAddr::from(src_mac_bytes);
// Create valid non-deterministic ethertype
let ethertype: u16 = kani::any();
// Verify write_incomplete
let incomplete_frame =
EthernetFrame::write_incomplete(bytes.as_mut(), dst_mac, src_mac, ethertype).unwrap();
// Check for post-conditions
assert_eq!(incomplete_frame.inner.dst_mac(), dst_mac);
assert_eq!(incomplete_frame.inner.src_mac(), src_mac);
assert_eq!(incomplete_frame.inner.ethertype(), ethertype);
}
#[kani::proof]
#[kani::solver(cadical)]
fn verify_with_payload_len_unchecked() {
// Create non-deterministic stream of bytes up to MAX_FRAME_SIZE
let mut bytes: [u8; MAX_FRAME_SIZE] = kani::Arbitrary::any_array::<MAX_FRAME_SIZE>();
// Create valid non-deterministic dst_mac
let dst_mac_bytes: [u8; MAC_ADDR_LEN as usize] =
kani::Arbitrary::any_array::<MAC_ADDR_LEN_USIZE>();
let dst_mac = MacAddr::from(dst_mac_bytes);
// Create valid non-deterministic src_mac
let src_mac_bytes: [u8; MAC_ADDR_LEN as usize] =
kani::Arbitrary::any_array::<MAC_ADDR_LEN_USIZE>();
let src_mac = MacAddr::from(src_mac_bytes);
// Create valid non-deterministic ethertype
let ethertype: u16 = kani::any();
// Create a non-deterministic incomplete frame
let incomplete_frame =
EthernetFrame::write_incomplete(bytes.as_mut(), dst_mac, src_mac, ethertype).unwrap();
let incomplete_frame_payload_offset = incomplete_frame.inner.payload_offset();
let incomplete_frame_len = incomplete_frame.inner.len();
// Create a non-deterministic payload_len
let payload_len: usize = kani::any();
kani::assume(payload_len <= incomplete_frame_len - incomplete_frame_payload_offset);
// Verify with_payload_len_unchecked
let unchecked_frame = incomplete_frame.with_payload_len_unchecked(payload_len);
// Check for post-conditions
assert!(unchecked_frame.is_valid());
assert_eq!(unchecked_frame.dst_mac(), dst_mac);
assert_eq!(unchecked_frame.src_mac(), src_mac);
assert_eq!(unchecked_frame.ethertype(), ethertype);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/pdu/udp.rs | src/vmm/src/dumbo/pdu/udp.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Contains support for parsing and writing User Datagram Protocol (UDP) packets,
//! with no support for jumbograms.
//!
//! Details of the UDP packet specification can be found at [1] [2].
//!
//! [1]: https://tools.ietf.org/html/rfc768
//! [2]: https://tools.ietf.org/html/rfc5405
use std::fmt::Debug;
use std::net::Ipv4Addr;
use super::bytes::{InnerBytes, NetworkBytes};
use crate::dumbo::pdu::bytes::NetworkBytesMut;
use crate::dumbo::pdu::{ChecksumProto, Incomplete};
const SOURCE_PORT_OFFSET: usize = 0;
const DESTINATION_PORT_OFFSET: usize = 2;
const LENGTH_OFFSET: usize = 4;
const CHECKSUM_OFFSET: usize = 6;
const PAYLOAD_OFFSET: usize = 8;
/// The header length is 8 octets (bytes).
pub const UDP_HEADER_SIZE: usize = 8;
// A UDP datagram is carried in a single IP packet and is hence limited
// to a maximum payload of 65,507 bytes for IPv4 and 65,527 bytes for IPv6 [2]
const IPV4_MAX_UDP_PACKET_SIZE: u16 = 65507;
/// Represents errors which may occur while parsing or writing a datagram.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum UdpError {
/// Invalid checksum.
Checksum,
/// The specified byte sequence is shorter than the Ethernet header length.
DatagramTooShort,
/// The payload to be added to the UDP packet exceeds the size allowed by the used IP version.
PayloadTooBig,
}
/// Interprets the inner bytes as a UDP datagram.
#[derive(Debug)]
pub struct UdpDatagram<'a, T: 'a> {
bytes: InnerBytes<'a, T>,
}
#[allow(clippy::len_without_is_empty)]
impl<T: NetworkBytes + Debug> UdpDatagram<'_, T> {
/// Interprets `bytes` as a UDP datagram without any validity checks.
///
/// # Panics
///
/// This method does not panic, but further method calls on the resulting object may panic if
/// `bytes` contains invalid input.
#[inline]
pub fn from_bytes_unchecked(bytes: T) -> Self {
UdpDatagram {
bytes: InnerBytes::new(bytes),
}
}
/// Interprets `bytes` as a UDP datagram if possible or returns
/// the reason for failing to do so.
#[inline]
pub fn from_bytes(
bytes: T,
verify_checksum: Option<(Ipv4Addr, Ipv4Addr)>,
) -> Result<Self, UdpError> {
if bytes.len() < UDP_HEADER_SIZE {
return Err(UdpError::DatagramTooShort);
}
let datagram = UdpDatagram::from_bytes_unchecked(bytes);
if let Some((src_addr, dst_addr)) = verify_checksum {
// Since compute_checksum is shared between TCP and UDP and the UDP's RFC
// requires that a computed checksum of 0 is transmitted as all ones value, we're
// checking against 0xffff not 0
if datagram.checksum() != 0 && datagram.compute_checksum(src_addr, dst_addr) != 0xffff {
return Err(UdpError::Checksum);
}
}
Ok(datagram)
}
/// Returns the source port of the UDP datagram.
#[inline]
pub fn source_port(&self) -> u16 {
self.bytes.ntohs_unchecked(SOURCE_PORT_OFFSET)
}
/// Returns the destination port of the UDP datagram.
#[inline]
pub fn destination_port(&self) -> u16 {
self.bytes.ntohs_unchecked(DESTINATION_PORT_OFFSET)
}
/// Returns the length of the datagram from its header.
#[inline]
pub fn len(&self) -> u16 {
self.bytes.ntohs_unchecked(LENGTH_OFFSET)
}
/// Returns the checksum value of the packet.
#[inline]
pub fn checksum(&self) -> u16 {
self.bytes.ntohs_unchecked(CHECKSUM_OFFSET)
}
/// Returns the payload of the UDP datagram as an `[&u8]` slice.
#[inline]
pub fn payload(&self) -> &[u8] {
// Payload offset is header len.
self.bytes.split_at(PAYLOAD_OFFSET).1
}
/// Computes the checksum of a UDP datagram.
#[inline]
pub fn compute_checksum(&self, src_addr: Ipv4Addr, dst_addr: Ipv4Addr) -> u16 {
crate::dumbo::pdu::compute_checksum(&self.bytes, src_addr, dst_addr, ChecksumProto::Udp)
}
}
impl<T: NetworkBytesMut + Debug> UdpDatagram<'_, T> {
/// Writes an incomplete UDP datagram, which is missing the `checksum`, `src_port` and
/// `dst_port` fields.
///
/// # Arguments
///
/// * `buf` - A buffer containing `NetworkBytesMut` representing a datagram.
/// * `payload` - Datagram payload.
#[inline]
pub fn write_incomplete_datagram(buf: T, payload: &[u8]) -> Result<Incomplete<Self>, UdpError> {
let mut packet = UdpDatagram::from_bytes(buf, None)?;
let len = payload.len() + UDP_HEADER_SIZE;
let len = match u16::try_from(len) {
Ok(len) if len <= IPV4_MAX_UDP_PACKET_SIZE => len,
_ => return Err(UdpError::PayloadTooBig),
};
packet.bytes.shrink_unchecked(len.into());
packet.payload_mut().copy_from_slice(payload);
packet.set_len(len);
Ok(Incomplete::new(packet))
}
/// Sets the source port of the UDP datagram.
#[inline]
pub fn set_source_port(&mut self, src_port: u16) -> &mut Self {
self.bytes.htons_unchecked(SOURCE_PORT_OFFSET, src_port);
self
}
/// Sets the destination port of the UDP datagram.
#[inline]
pub fn set_destination_port(&mut self, dst_port: u16) -> &mut Self {
self.bytes
.htons_unchecked(DESTINATION_PORT_OFFSET, dst_port);
self
}
/// Sets the payload of the UDP datagram.
#[inline]
pub fn payload_mut(&mut self) -> &mut [u8] {
&mut self.bytes[PAYLOAD_OFFSET..]
}
/// Sets the length field in the UDP datagram header.
#[inline]
pub fn set_len(&mut self, len: u16) -> &mut Self {
self.bytes.htons_unchecked(LENGTH_OFFSET, len);
self
}
/// Sets the checksum of a UDP datagram.
#[inline]
pub fn set_checksum(&mut self, checksum: u16) -> &mut Self {
self.bytes.htons_unchecked(CHECKSUM_OFFSET, checksum);
self
}
}
impl<'a, T: NetworkBytesMut + Debug> Incomplete<UdpDatagram<'a, T>> {
/// Transforms `self` into a `UdpDatagram<T>` by specifying values for the `source port`,
/// `destination port`, and (optionally) the information required to compute the checksum.
#[inline]
pub fn finalize(
mut self,
src_port: u16,
dst_port: u16,
compute_checksum: Option<(Ipv4Addr, Ipv4Addr)>,
) -> UdpDatagram<'a, T> {
self.inner.set_source_port(src_port);
self.inner.set_destination_port(dst_port);
self.inner.set_checksum(0);
if let Some((src_addr, dst_addr)) = compute_checksum {
let checksum = self.inner.compute_checksum(src_addr, dst_addr);
self.inner.set_checksum(checksum);
}
self.inner
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::dumbo::pdu::udp::UdpDatagram;
#[test]
#[allow(clippy::len_zero)]
fn test_set_get() {
let mut raw = [0u8; 30];
let total_len = raw.len();
let mut p: UdpDatagram<&mut [u8]> = UdpDatagram::from_bytes_unchecked(raw.as_mut());
assert_eq!(p.source_port(), 0);
let src_port: u16 = 213;
p.set_source_port(src_port);
assert_eq!(p.source_port(), src_port);
assert_eq!(p.destination_port(), 0);
let dst_port: u16 = 64193;
p.set_destination_port(dst_port);
assert_eq!(p.destination_port(), dst_port);
assert_eq!(p.len(), 0);
let len = 12;
p.set_len(len);
assert_eq!(p.len(), len);
assert_eq!(p.checksum(), 0);
let checksum: u16 = 32;
p.set_checksum(32);
assert_eq!(p.checksum(), checksum);
let payload_length = total_len - UDP_HEADER_SIZE;
assert_eq!(p.payload().len(), payload_length);
let payload: Vec<u8> = (0..u8::try_from(payload_length).unwrap()).collect();
p.payload_mut().copy_from_slice(&payload);
assert_eq!(*p.payload(), payload[..]);
}
#[test]
fn test_failing_construction() {
let mut raw = [0u8; 8];
let huge_payload = [0u8; IPV4_MAX_UDP_PACKET_SIZE as usize];
assert_eq!(
UdpDatagram::write_incomplete_datagram(raw.as_mut(), &huge_payload).unwrap_err(),
UdpError::PayloadTooBig
);
let mut short_header = [0u8; UDP_HEADER_SIZE - 1];
assert_eq!(
UdpDatagram::from_bytes(short_header.as_mut(), None).unwrap_err(),
UdpError::DatagramTooShort
)
}
#[test]
fn test_construction() {
let mut packet = [0u8; 32 + UDP_HEADER_SIZE]; // 32-byte payload
let payload: Vec<u8> = (0..32).collect();
let src_port = 32133;
let dst_port = 22113;
let src_addr = Ipv4Addr::new(10, 100, 11, 21);
let dst_addr = Ipv4Addr::new(192, 168, 121, 35);
let p = UdpDatagram::write_incomplete_datagram(packet.as_mut(), &payload[..]).unwrap();
let mut p = p.finalize(src_port, dst_port, Some((src_addr, dst_addr)));
let checksum = p.checksum();
let c = p.compute_checksum(src_addr, dst_addr);
assert_eq!(c, 0xffff);
p.set_checksum(0);
let computed_checksum = p.compute_checksum(src_addr, dst_addr);
assert_eq!(checksum, computed_checksum);
let mut a = [1u8; 128];
let checksum = UdpDatagram::from_bytes_unchecked(a.as_mut()).checksum();
// Modify bytes in a by making a fake packet,
// to allow us to modify the checksum manually
let _ =
UdpDatagram::from_bytes_unchecked(a.as_mut()).set_checksum(checksum.wrapping_add(1));
let p_err = UdpDatagram::from_bytes(a.as_mut(), Some((src_addr, dst_addr))).unwrap_err();
assert_eq!(p_err, UdpError::Checksum);
}
#[test]
fn test_checksum() {
let mut bytes = [0u8; 2 + UDP_HEADER_SIZE]; // 2-byte payload
let correct_checksum: u16 = 0x14de;
let payload_bytes = b"bb";
let src_ip = Ipv4Addr::new(152, 1, 51, 27);
let dst_ip = Ipv4Addr::new(152, 14, 94, 75);
let p = UdpDatagram::write_incomplete_datagram(bytes.as_mut(), payload_bytes).unwrap();
let p = p.finalize(41103, 9876, Some((src_ip, dst_ip)));
assert_eq!(p.checksum(), correct_checksum);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/pdu/tcp.rs | src/vmm/src/dumbo/pdu/tcp.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Contains support for parsing and writing TCP segments.
//!
//! [Here]'s a useful depiction of the TCP header layout (watch out for the MSB 0 bit numbering.)
//!
//! [Here]: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
use std::cmp::min;
use std::fmt::Debug;
use std::net::Ipv4Addr;
use std::num::NonZeroU16;
use bitflags::bitflags;
use super::Incomplete;
use super::bytes::{InnerBytes, NetworkBytes, NetworkBytesMut};
use crate::dumbo::ByteBuffer;
use crate::dumbo::pdu::ChecksumProto;
const SOURCE_PORT_OFFSET: usize = 0;
const DESTINATION_PORT_OFFSET: usize = 2;
const SEQ_NUMBER_OFFSET: usize = 4;
const ACK_NUMBER_OFFSET: usize = 8;
const DATAOFF_RSVD_NS_OFFSET: usize = 12;
const FLAGS_AFTER_NS_OFFSET: usize = 13;
const WINDOW_SIZE_OFFSET: usize = 14;
const CHECKSUM_OFFSET: usize = 16;
const URG_POINTER_OFFSET: usize = 18;
const OPTIONS_OFFSET: u8 = 20;
const MAX_HEADER_LEN: u8 = 60;
const OPTION_KIND_EOL: u8 = 0x00;
const OPTION_KIND_NOP: u8 = 0x01;
const OPTION_KIND_MSS: u8 = 0x02;
const OPTION_LEN_MSS: u8 = 0x04;
// An arbitrarily chosen value, used for sanity checks.
const MSS_MIN: u16 = 100;
bitflags! {
/// Represents the TCP header flags, with the exception of `NS`.
///
/// These values are only valid in conjunction with the [`flags_after_ns()`] method (and its
/// associated setter method), which operates on the header byte containing every other flag
/// besides `NS`.
///
/// [`flags_after_ns()`]: struct.TcpSegment.html#method.flags_after_ns
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Flags: u8 {
/// Congestion window reduced.
const CWR = 1 << 7;
/// ECN-echo.
const ECE = 1 << 6;
/// Urgent pointer.
const URG = 1 << 5;
/// The acknowledgement number field is valid.
const ACK = 1 << 4;
/// Push flag.
const PSH = 1 << 3;
/// Reset the connection.
const RST = 1 << 2;
/// SYN flag.
const SYN = 1 << 1;
/// FIN flag.
const FIN = 1;
}
}
/// Describes the errors which may occur while handling TCP segments.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum TcpError {
/// Invalid checksum.
Checksum,
/// A payload has been specified for the segment, but the maximum readable length is 0.
EmptyPayload,
/// Invalid header length.
HeaderLen,
/// The MSS option contains an invalid value.
MssOption,
/// The remaining segment length cannot accommodate the MSS option.
MssRemaining,
/// The specified slice is shorter than the header length.
SliceTooShort,
}
// TODO: The implementation of TcpSegment is IPv4 specific in regard to checksum computation. Maybe
// make it more generic at some point.
/// Interprets the inner bytes as a TCP segment.
#[derive(Debug)]
pub struct TcpSegment<'a, T: 'a> {
bytes: InnerBytes<'a, T>,
}
#[allow(clippy::len_without_is_empty)]
impl<T: NetworkBytes + Debug> TcpSegment<'_, T> {
/// Returns the source port.
#[inline]
pub fn source_port(&self) -> u16 {
self.bytes.ntohs_unchecked(SOURCE_PORT_OFFSET)
}
/// Returns the destination port.
#[inline]
pub fn destination_port(&self) -> u16 {
self.bytes.ntohs_unchecked(DESTINATION_PORT_OFFSET)
}
/// Returns the sequence number.
#[inline]
pub fn sequence_number(&self) -> u32 {
self.bytes.ntohl_unchecked(SEQ_NUMBER_OFFSET)
}
/// Returns the acknowledgement number (only valid if the `ACK` flag is set).
#[inline]
pub fn ack_number(&self) -> u32 {
self.bytes.ntohl_unchecked(ACK_NUMBER_OFFSET)
}
/// Returns the header length, the value of the reserved bits, and whether the `NS` flag
/// is set or not.
#[inline]
pub fn header_len_rsvd_ns(&self) -> (u8, u8, bool) {
let value = self.bytes[DATAOFF_RSVD_NS_OFFSET];
let data_offset = value >> 4;
let header_len = data_offset * 4;
let rsvd = value & 0x0e;
let ns = (value & 1) != 0;
(header_len, rsvd, ns)
}
/// Returns the length of the header.
#[inline]
pub fn header_len(&self) -> u8 {
self.header_len_rsvd_ns().0
}
/// Returns the TCP header flags, with the exception of `NS`.
#[inline]
pub fn flags_after_ns(&self) -> Flags {
Flags::from_bits_truncate(self.bytes[FLAGS_AFTER_NS_OFFSET])
}
/// Returns the value of the `window size` header field.
#[inline]
pub fn window_size(&self) -> u16 {
self.bytes.ntohs_unchecked(WINDOW_SIZE_OFFSET)
}
/// Returns the value of the `checksum` header field.
#[inline]
pub fn checksum(&self) -> u16 {
self.bytes.ntohs_unchecked(CHECKSUM_OFFSET)
}
/// Returns the value of the `urgent pointer` header field (only valid if the
/// `URG` flag is set).
#[inline]
pub fn urgent_pointer(&self) -> u16 {
self.bytes.ntohs_unchecked(URG_POINTER_OFFSET)
}
/// Returns the TCP header options as an `[&u8]` slice.
///
/// # Panics
///
/// This method may panic if the value of `header_len` is invalid.
#[inline]
pub fn options_unchecked(&self, header_len: usize) -> &[u8] {
&self.bytes[usize::from(OPTIONS_OFFSET)..header_len]
}
/// Returns a slice which contains the payload of the segment. May panic if the value of
/// `header_len` is invalid.
///
/// # Panics
///
/// This method may panic if the value of `header_len` is invalid.
#[inline]
pub fn payload_unchecked(&self, header_len: usize) -> &[u8] {
self.bytes.split_at(header_len).1
}
/// Returns the length of the segment.
#[inline]
pub fn len(&self) -> u16 {
// NOTE: This appears to be a safe conversion in all current cases.
// Packets are always set up in the context of an Ipv4Packet, which is
// capped at a u16 size. However, I'd rather be safe here.
u16::try_from(self.bytes.len()).unwrap_or(u16::MAX)
}
/// Returns a slice which contains the payload of the segment.
#[inline]
pub fn payload(&self) -> &[u8] {
self.payload_unchecked(self.header_len().into())
}
/// Returns the length of the payload.
#[inline]
pub fn payload_len(&self) -> u16 {
self.len() - u16::from(self.header_len())
}
/// Computes the TCP checksum of the segment. More details about TCP checksum computation can
/// be found [here].
///
/// [here]: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#Checksum_computation
pub fn compute_checksum(&self, src_addr: Ipv4Addr, dst_addr: Ipv4Addr) -> u16 {
crate::dumbo::pdu::compute_checksum(&self.bytes, src_addr, dst_addr, ChecksumProto::Tcp)
}
/// Parses TCP header options (only `MSS` is supported for now).
///
/// If no error is encountered, returns the `MSS` value, or `None` if the option is not
/// present.
///
/// # Panics
///
/// This method may panic if the value of `header_len` is invalid.
pub fn parse_mss_option_unchecked(
&self,
header_len: usize,
) -> Result<Option<NonZeroU16>, TcpError> {
let b = self.options_unchecked(header_len);
let mut i = 0;
// All TCP options (except EOL and NOP) are encoded using x bytes (x >= 2), where the first
// byte represents the option kind, the second is the option length (including these first
// two bytes), and finally the next x - 2 bytes represent option data. The length of
// the MSS option is 4, so the option data encodes an u16 in network order.
// The MSS option is 4 bytes wide, so we need at least 4 more bytes to look for it.
while i + 3 < b.len() {
match b[i] {
OPTION_KIND_EOL => break,
OPTION_KIND_NOP => {
i += 1;
continue;
}
OPTION_KIND_MSS => {
// Read from option data (we skip checking if the len is valid).
// TODO: To be super strict, we should make sure there aren't additional MSS
// options present (which would be super wrong). Should we be super strict?
let mss = b.ntohs_unchecked(i + 2);
if mss < MSS_MIN {
return Err(TcpError::MssOption);
}
// The unwarp() is safe because mms >= MSS_MIN at this point.
return Ok(Some(NonZeroU16::new(mss).unwrap()));
}
_ => {
// Some other option; just skip opt_len bytes in total.
i += b[i + 1] as usize;
continue;
}
}
}
Ok(None)
}
/// Interprets `bytes` as a TCP segment without any validity checks.
///
/// # Panics
///
/// This method does not panic, but further method calls on the resulting object may panic if
/// `bytes` contains invalid input.
#[inline]
pub fn from_bytes_unchecked(bytes: T) -> Self {
TcpSegment {
bytes: InnerBytes::new(bytes),
}
}
/// Attempts to interpret `bytes` as a TCP segment, checking the validity of the header fields.
///
/// The `verify_checksum` parameter must contain the source and destination addresses from the
/// enclosing IPv4 packet if the TCP checksum must be validated.
#[inline]
pub fn from_bytes(
bytes: T,
verify_checksum: Option<(Ipv4Addr, Ipv4Addr)>,
) -> Result<Self, TcpError> {
if bytes.len() < usize::from(OPTIONS_OFFSET) {
return Err(TcpError::SliceTooShort);
}
let segment = Self::from_bytes_unchecked(bytes);
// We skip checking if the reserved bits are 0b000 (and a couple of other things).
let header_len = segment.header_len();
if header_len < OPTIONS_OFFSET
|| u16::from(header_len) > min(u16::from(MAX_HEADER_LEN), segment.len())
{
return Err(TcpError::HeaderLen);
}
if let Some((src_addr, dst_addr)) = verify_checksum
&& segment.compute_checksum(src_addr, dst_addr) != 0
{
return Err(TcpError::Checksum);
}
Ok(segment)
}
}
impl<T: NetworkBytesMut + Debug> TcpSegment<'_, T> {
/// Sets the source port.
#[inline]
pub fn set_source_port(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(SOURCE_PORT_OFFSET, value);
self
}
/// Sets the destination port.
#[inline]
pub fn set_destination_port(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(DESTINATION_PORT_OFFSET, value);
self
}
/// Sets the value of the sequence number field.
#[inline]
pub fn set_sequence_number(&mut self, value: u32) -> &mut Self {
self.bytes.htonl_unchecked(SEQ_NUMBER_OFFSET, value);
self
}
/// Sets the value of the acknowledgement number field.
#[inline]
pub fn set_ack_number(&mut self, value: u32) -> &mut Self {
self.bytes.htonl_unchecked(ACK_NUMBER_OFFSET, value);
self
}
/// Sets the value of the `ihl` header field based on `header_len` (which should be a multiple
/// of 4), clears the reserved bits, and sets the `NS` flag according to the last parameter.
// TODO: Check that header_len | 0b11 == 0 and the resulting data_offset is valid?
#[inline]
pub fn set_header_len_rsvd_ns(&mut self, header_len: u8, ns: bool) -> &mut Self {
let mut value = header_len << 2;
if ns {
value |= 1;
}
self.bytes[DATAOFF_RSVD_NS_OFFSET] = value;
self
}
/// Sets the value of the header byte containing every TCP flag except `NS`.
#[inline]
pub fn set_flags_after_ns(&mut self, flags: Flags) -> &mut Self {
self.bytes[FLAGS_AFTER_NS_OFFSET] = flags.bits();
self
}
/// Sets the value of the `window size` field.
#[inline]
pub fn set_window_size(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(WINDOW_SIZE_OFFSET, value);
self
}
/// Sets the value of the `checksum` field.
#[inline]
pub fn set_checksum(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(CHECKSUM_OFFSET, value);
self
}
/// Sets the value of the `urgent pointer` field.
#[inline]
pub fn set_urgent_pointer(&mut self, value: u16) -> &mut Self {
self.bytes.htons_unchecked(URG_POINTER_OFFSET, value);
self
}
/// Returns a mutable slice containing the segment payload.
///
/// # Panics
///
/// This method may panic if the value of `header_len` is invalid.
#[inline]
pub fn payload_mut_unchecked(&mut self, header_len: usize) -> &mut [u8] {
self.bytes.split_at_mut(header_len).1
}
/// Returns a mutable slice containing the segment payload.
#[inline]
pub fn payload_mut(&mut self) -> &mut [u8] {
let header_len = self.header_len();
self.payload_mut_unchecked(header_len.into())
}
/// Writes a complete TCP segment.
///
/// # Arguments
///
/// * `buf` - Write the segment to this buffer.
/// * `src_port` - Source port.
/// * `dst_port` - Destination port.
/// * `seq_number` - Sequence number.
/// * `ack_number` - Acknowledgement number.
/// * `flags_after_ns` - TCP flags to set (except `NS`, which is always set to 0).
/// * `window_size` - Value to write in the `window size` field.
/// * `mss_option` - When a value is specified, use it to add a TCP MSS option to the header.
/// * `mss_remaining` - Represents an upper bound on the payload length (the number of bytes
/// used up by things like IP options have to be subtracted from the MSS). There is some
/// redundancy looking at this argument and the next one, so we might end up removing or
/// changing something.
/// * `payload` - May contain a buffer which holds payload data and the maximum amount of bytes
/// we should read from that buffer. When `None`, the TCP segment will carry no payload.
/// * `compute_checksum` - May contain the pair addresses from the enclosing IPv4 packet, which
/// are required for TCP checksum computation. Skip the checksum altogether when `None`.
#[allow(clippy::too_many_arguments)]
#[inline]
pub fn write_segment<R: ByteBuffer + ?Sized + Debug>(
buf: T,
src_port: u16,
dst_port: u16,
seq_number: u32,
ack_number: u32,
flags_after_ns: Flags,
window_size: u16,
mss_option: Option<u16>,
mss_remaining: u16,
payload: Option<(&R, usize)>,
compute_checksum: Option<(Ipv4Addr, Ipv4Addr)>,
) -> Result<Self, TcpError> {
Ok(Self::write_incomplete_segment(
buf,
seq_number,
ack_number,
flags_after_ns,
window_size,
mss_option,
mss_remaining,
payload,
)?
.finalize(src_port, dst_port, compute_checksum))
}
/// Writes an incomplete TCP segment, which is missing the `source port`, `destination port`,
/// and `checksum` fields.
///
/// This method writes the rest of the segment, including data (when available). Only the `MSS`
/// option is supported for now. The `NS` flag, `URG` flag, and `urgent pointer` field are set
/// to 0.
///
/// # Arguments
///
/// * `buf` - Write the segment to this buffer.
/// * `seq_number` - Sequence number.
/// * `ack_number` - Acknowledgement number.
/// * `flags_after_ns` - TCP flags to set (except `NS`, which is always set to 0).
/// * `window_size` - Value to write in the `window size` field.
/// * `mss_option` - When a value is specified, use it to add a TCP MSS option to the header.
/// * `mss_remaining` - Represents an upper bound on the payload length (the number of bytes
/// used up by things like IP options have to be subtracted from the MSS). There is some
/// redundancy looking at this argument and the next one, so we might end up removing or
/// changing something.
/// * `payload` - May contain a buffer which holds payload data and the maximum amount of bytes
/// we should read from that buffer. When `None`, the TCP segment will carry no payload.
// Marked inline because a lot of code vanishes after constant folding when
// we don't add TCP options, or when mss_remaining is actually a constant, etc.
#[allow(clippy::too_many_arguments)]
#[inline]
pub fn write_incomplete_segment<R: ByteBuffer + ?Sized + Debug>(
buf: T,
seq_number: u32,
ack_number: u32,
flags_after_ns: Flags,
window_size: u16,
mss_option: Option<u16>,
mss_remaining: u16,
payload: Option<(&R, usize)>,
) -> Result<Incomplete<Self>, TcpError> {
let mut mss_left = mss_remaining;
// We're going to need at least this many bytes.
let mut segment_len = u16::from(OPTIONS_OFFSET);
// The TCP options will require this much more bytes.
let options_len = if mss_option.is_some() {
mss_left = mss_left
.checked_sub(OPTION_LEN_MSS.into())
.ok_or(TcpError::MssRemaining)?;
OPTION_LEN_MSS
} else {
0
};
segment_len += u16::from(options_len);
if buf.len() < usize::from(segment_len) {
return Err(TcpError::SliceTooShort);
}
// The unchecked call is safe because buf.len() >= segment_len.
let mut segment = Self::from_bytes_unchecked(buf);
segment
.set_sequence_number(seq_number)
.set_ack_number(ack_number)
.set_header_len_rsvd_ns(OPTIONS_OFFSET + options_len, false)
.set_flags_after_ns(flags_after_ns)
.set_window_size(window_size)
.set_urgent_pointer(0);
// Let's write the MSS option if we have to.
if let Some(value) = mss_option {
segment.bytes[usize::from(OPTIONS_OFFSET)] = OPTION_KIND_MSS;
segment.bytes[usize::from(OPTIONS_OFFSET) + 1] = OPTION_LEN_MSS;
segment
.bytes
.htons_unchecked(usize::from(OPTIONS_OFFSET) + 2, value);
}
let payload_bytes_count = if let Some((payload_buf, max_payload_bytes)) = payload {
let left_to_read = min(payload_buf.len(), max_payload_bytes);
// The subtraction makes sense because we previously checked that
// buf.len() >= segment_len.
let mut room_for_payload = min(segment.len() - segment_len, mss_left);
// The unwrap is safe because room_for_payload is a u16.
room_for_payload =
u16::try_from(min(usize::from(room_for_payload), left_to_read)).unwrap();
if room_for_payload == 0 {
return Err(TcpError::EmptyPayload);
}
// Copy `room_for_payload` bytes into `payload_buf` using `offset=0`.
// Guaranteed not to panic since we checked above that:
// `offset + room_for_payload <= payload_buf.len()`.
payload_buf.read_to_slice(
0,
&mut segment.bytes
[usize::from(segment_len)..usize::from(segment_len + room_for_payload)],
);
room_for_payload
} else {
0
};
segment_len += payload_bytes_count;
// This is ok because segment_len <= buf.len().
segment.bytes.shrink_unchecked(segment_len.into());
// Shrink the resulting segment to a slice of exact size, so using self.len() makes sense.
Ok(Incomplete::new(segment))
}
}
impl<'a, T: NetworkBytesMut + Debug> Incomplete<TcpSegment<'a, T>> {
/// Transforms `self` into a `TcpSegment<T>` by specifying values for the `source port`,
/// `destination port`, and (optionally) the information required to compute the TCP checksum.
#[inline]
pub fn finalize(
mut self,
src_port: u16,
dst_port: u16,
compute_checksum: Option<(Ipv4Addr, Ipv4Addr)>,
) -> TcpSegment<'a, T> {
self.inner.set_source_port(src_port);
self.inner.set_destination_port(dst_port);
if let Some((src_addr, dst_addr)) = compute_checksum {
// Set this to 0 first.
self.inner.set_checksum(0);
let checksum = self.inner.compute_checksum(src_addr, dst_addr);
self.inner.set_checksum(checksum);
}
self.inner
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_set_get() {
let mut a = [0u8; 100];
let mut p = TcpSegment::from_bytes_unchecked(a.as_mut());
assert_eq!(p.source_port(), 0);
p.set_source_port(123);
assert_eq!(p.source_port(), 123);
assert_eq!(p.destination_port(), 0);
p.set_destination_port(322);
assert_eq!(p.destination_port(), 322);
assert_eq!(p.sequence_number(), 0);
p.set_sequence_number(1_234_567);
assert_eq!(p.sequence_number(), 1_234_567);
assert_eq!(p.ack_number(), 0);
p.set_ack_number(345_234);
assert_eq!(p.ack_number(), 345_234);
assert_eq!(p.header_len_rsvd_ns(), (0, 0, false));
assert_eq!(p.header_len(), 0);
// Header_len must be a multiple of 4 here to be valid.
let header_len = 60;
p.set_header_len_rsvd_ns(header_len, true);
assert_eq!(p.header_len_rsvd_ns(), (header_len, 0, true));
assert_eq!(p.header_len(), header_len);
assert_eq!(p.flags_after_ns().bits(), 0);
p.set_flags_after_ns(Flags::SYN | Flags::URG);
assert_eq!(p.flags_after_ns(), Flags::SYN | Flags::URG);
assert_eq!(p.window_size(), 0);
p.set_window_size(60000);
assert_eq!(p.window_size(), 60000);
assert_eq!(p.checksum(), 0);
p.set_checksum(4321);
assert_eq!(p.checksum(), 4321);
assert_eq!(p.urgent_pointer(), 0);
p.set_urgent_pointer(5554);
assert_eq!(p.urgent_pointer(), 5554);
}
#[test]
fn test_constructors() {
let mut a = [1u8; 1460];
let b = [2u8; 1000];
let c = [3u8; 2000];
let src_addr = Ipv4Addr::new(10, 1, 2, 3);
let dst_addr = Ipv4Addr::new(192, 168, 44, 77);
let src_port = 1234;
let dst_port = 5678;
let seq_number = 11_111_222;
let ack_number = 34_566_543;
let flags_after_ns = Flags::SYN | Flags::RST;
let window_size = 19999;
let mss_left = 1460;
let mss_option = Some(mss_left);
let payload = Some((b.as_ref(), b.len()));
let header_len = OPTIONS_OFFSET + OPTION_LEN_MSS;
let segment_len = {
let mut segment = TcpSegment::write_segment(
a.as_mut(),
src_port,
dst_port,
seq_number,
ack_number,
flags_after_ns,
window_size,
mss_option,
mss_left,
payload,
Some((src_addr, dst_addr)),
)
.unwrap();
assert_eq!(segment.source_port(), src_port);
assert_eq!(segment.destination_port(), dst_port);
assert_eq!(segment.sequence_number(), seq_number);
assert_eq!(segment.ack_number(), ack_number);
assert_eq!(segment.header_len_rsvd_ns(), (header_len, 0, false));
assert_eq!(segment.flags_after_ns(), flags_after_ns);
assert_eq!(segment.window_size(), window_size);
let checksum = segment.checksum();
segment.set_checksum(0);
let computed_checksum = segment.compute_checksum(src_addr, dst_addr);
assert_eq!(checksum, computed_checksum);
segment.set_checksum(checksum);
assert_eq!(segment.compute_checksum(src_addr, dst_addr), 0);
assert_eq!(segment.urgent_pointer(), 0);
{
let options = segment.options_unchecked(header_len.into());
assert_eq!(options.len(), usize::from(OPTION_LEN_MSS));
assert_eq!(options[0], OPTION_KIND_MSS);
assert_eq!(options[1], OPTION_LEN_MSS);
assert_eq!(options.ntohs_unchecked(2), mss_left);
}
// Payload was smaller than mss_left after options.
assert_eq!(
usize::from(segment.len()),
usize::from(header_len) + b.len(),
);
segment.len()
// Mutable borrow of a goes out of scope.
};
{
let segment =
TcpSegment::from_bytes(&a[..segment_len.into()], Some((src_addr, dst_addr)))
.unwrap();
assert_eq!(
segment.parse_mss_option_unchecked(header_len.into()),
Ok(Some(NonZeroU16::new(mss_left).unwrap()))
);
}
// Let's quickly see what happens when the payload buf is larger than our mutable slice.
{
let segment_len = TcpSegment::write_segment(
a.as_mut(),
src_port,
dst_port,
seq_number,
ack_number,
flags_after_ns,
window_size,
mss_option,
mss_left,
Some((c.as_ref(), c.len())),
Some((src_addr, dst_addr)),
)
.unwrap()
.len();
assert_eq!(segment_len, mss_left);
}
// Now let's test the error value for from_bytes().
// Using a helper function here instead of a closure because it's hard (impossible?) to
// specify lifetime bounds for closure arguments.
fn p(buf: &mut [u8]) -> TcpSegment<'_, &mut [u8]> {
TcpSegment::from_bytes_unchecked(buf)
}
// Just a helper closure.
let look_for_error = |buf: &[u8], err: TcpError| {
assert_eq!(
TcpSegment::from_bytes(buf, Some((src_addr, dst_addr))).unwrap_err(),
err
);
};
// Header length too short.
p(a.as_mut()).set_header_len_rsvd_ns(OPTIONS_OFFSET.checked_sub(1).unwrap(), false);
look_for_error(a.as_ref(), TcpError::HeaderLen);
// Header length too large.
p(a.as_mut()).set_header_len_rsvd_ns(MAX_HEADER_LEN.checked_add(4).unwrap(), false);
look_for_error(a.as_ref(), TcpError::HeaderLen);
// The previously set checksum should be valid.
assert_eq!(
p(a.as_mut())
.set_header_len_rsvd_ns(header_len, false)
.compute_checksum(src_addr, dst_addr),
0
);
// Let's make it invalid.
let checksum = p(a.as_mut()).checksum();
p(a.as_mut()).set_checksum(checksum.wrapping_add(1));
look_for_error(a.as_ref(), TcpError::Checksum);
// Now we use a very small buffer.
let mut small_buf = [0u8; 1];
look_for_error(small_buf.as_ref(), TcpError::SliceTooShort);
assert_eq!(
TcpSegment::write_segment(
small_buf.as_mut(),
src_port,
dst_port,
seq_number,
ack_number,
flags_after_ns,
window_size,
mss_option,
mss_left,
payload,
Some((src_addr, dst_addr)),
)
.unwrap_err(),
TcpError::SliceTooShort
);
// Make sure we get the proper error for an insufficient value of mss_remaining.
assert_eq!(
TcpSegment::write_segment(
small_buf.as_mut(),
src_port,
dst_port,
seq_number,
ack_number,
flags_after_ns,
window_size,
mss_option,
0,
payload,
Some((src_addr, dst_addr)),
)
.unwrap_err(),
TcpError::MssRemaining
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/pdu/mod.rs | src/vmm/src/dumbo/pdu/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! A module for interpreting byte slices as protocol data units (PDUs).
//!
//! A PDU represents data transmitted as a single unit during communication using a specific
//! protocol. Ethernet frames, IP packets, and TCP segments are all examples of protocol data
//! units.
use std::fmt::Debug;
use std::net::Ipv4Addr;
use crate::dumbo::pdu::bytes::NetworkBytes;
use crate::dumbo::pdu::ipv4::{PROTOCOL_TCP, PROTOCOL_UDP};
pub mod arp;
pub mod bytes;
pub mod ethernet;
pub mod ipv4;
pub mod tcp;
pub mod udp;
/// This is the baseline definition of the `Incomplete` struct, which wraps a PDU that does is
/// still missing some values or content.
///
/// It's mostly important when writing PDUs, because fields like checksum
/// can only be computed after the payload becomes known. Also, the length of the underlying slice
/// should be equal to the actual size for a complete PDU. To that end, whenever a variable-length
/// payload is involved, the slice is shrunk to an exact fit. The particular ways of completing an
/// `Incomplete<T>` are implemented for each specific PDU.
#[derive(Debug)]
pub struct Incomplete<T> {
inner: T,
}
impl<T: Debug> Incomplete<T> {
#[inline]
fn new(inner: T) -> Self {
Incomplete { inner }
}
/// Returns a reference to the wrapped object.
#[inline]
pub fn inner(&self) -> &T {
&self.inner
}
/// Returns a mutable reference to the wrapped object.
#[inline]
pub fn inner_mut(&mut self) -> &mut T {
&mut self.inner
}
}
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq)]
enum ChecksumProto {
Tcp = PROTOCOL_TCP,
Udp = PROTOCOL_UDP,
}
/// Computes the checksum of a TCP/UDP packet. Since both protocols use
/// the same algorithm to compute the checksum.
///
/// # Arguments
/// * `bytes` - Raw bytes of a TCP packet or a UDP datagram
/// * `src_addr` - IPv4 source address
/// * `dst_addr` - IPv4 destination address
/// * `protocol` - **must** be either `PROTOCOL_TCP` or `PROTOCOL_UDP` defined in `ipv4` module
///
/// More details about TCP checksum computation can be found [here].
///
/// [here]: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#Checksum_computation
#[inline]
fn compute_checksum<T: NetworkBytes + Debug>(
bytes: &T,
src_addr: Ipv4Addr,
dst_addr: Ipv4Addr,
protocol: ChecksumProto,
) -> u16 {
let mut sum = 0usize;
let a = u32::from(src_addr) as usize;
sum += a & 0xffff;
sum += a >> 16;
let b = u32::from(dst_addr) as usize;
sum += b & 0xffff;
sum += b >> 16;
let len = bytes.len();
sum += protocol as usize;
sum += len;
for i in 0..len / 2 {
sum += usize::from(bytes.ntohs_unchecked(i * 2));
}
if len % 2 != 0 {
sum += usize::from(bytes[len - 1]) << 8;
}
while sum >> 16 != 0 {
sum = (sum & 0xffff) + (sum >> 16);
}
// Safe to unwrap due to the while loop above
let mut csum = !u16::try_from(sum).unwrap();
// If a UDP packet checksum is 0, an all ones value is transmitted
if protocol == ChecksumProto::Udp && csum == 0x0 {
csum = !csum;
}
csum
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/dumbo/pdu/arp.rs | src/vmm/src/dumbo/pdu/arp.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Contains logic that helps with handling ARP frames over Ethernet, which encapsulate requests
//! or replies related to IPv4 addresses.
//!
//! A more detailed view of an ARP frame can be found [here].
//!
//! [here]: https://en.wikipedia.org/wiki/Address_Resolution_Protocol
use std::convert::From;
use std::fmt::Debug;
use std::net::Ipv4Addr;
use super::bytes::{InnerBytes, NetworkBytes, NetworkBytesMut};
use super::ethernet::{self, ETHERTYPE_IPV4};
use crate::utils::net::mac::{MAC_ADDR_LEN, MacAddr};
/// ARP Request operation
pub const OPER_REQUEST: u16 = 0x0001;
/// ARP Reply operation
pub const OPER_REPLY: u16 = 0x0002;
/// ARP is for Ethernet hardware
pub const HTYPE_ETHERNET: u16 = 0x0001;
/// The length of an ARP frame for IPv4 over Ethernet.
pub const ETH_IPV4_FRAME_LEN: usize = 28;
const HTYPE_OFFSET: usize = 0;
const PTYPE_OFFSET: usize = 2;
const HLEN_OFFSET: usize = 4;
const PLEN_OFFSET: usize = 5;
const OPER_OFFSET: usize = 6;
const SHA_OFFSET: usize = 8;
// The following constants are specific to ARP requests/responses
// associated with IPv4 over Ethernet.
const ETH_IPV4_SPA_OFFSET: usize = 14;
const ETH_IPV4_THA_OFFSET: usize = 18;
const ETH_IPV4_TPA_OFFSET: usize = 24;
const IPV4_ADDR_LEN: u8 = 4;
/// Represents errors which may occur while parsing or writing a frame.
#[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)]
pub enum ArpError {
/// Invalid hardware address length.
HLen,
/// Invalid hardware type.
HType,
/// Invalid operation.
Operation,
/// Invalid protocol address length.
PLen,
/// Invalid protocol type.
PType,
/// The provided slice does not fit the size of a frame.
SliceExactLen,
}
/// The inner bytes will be interpreted as an ARP frame.
///
/// ARP is a generic protocol as far as data
/// link layer and network layer protocols go, but this particular implementation is concerned with
/// ARP frames related to IPv4 over Ethernet.
#[derive(Debug)]
pub struct EthIPv4ArpFrame<'a, T: 'a> {
bytes: InnerBytes<'a, T>,
}
#[allow(clippy::len_without_is_empty)]
impl<T: NetworkBytes + Debug> EthIPv4ArpFrame<'_, T> {
/// Interprets the given bytes as an ARP frame, without doing any validity checks beforehand.
///
/// # Panics
///
/// This method does not panic, but further method calls on the resulting object may panic if
/// `bytes` contains invalid input.
#[inline]
pub fn from_bytes_unchecked(bytes: T) -> Self {
EthIPv4ArpFrame {
bytes: InnerBytes::new(bytes),
}
}
/// Tries to interpret a byte slice as a valid IPv4 over Ethernet ARP request.
///
/// If no error occurs, it guarantees accessor methods (which make use of various `_unchecked`
/// functions) are safe to call on the result, because all predefined offsets will be valid.
pub fn request_from_bytes(bytes: T) -> Result<Self, ArpError> {
// This kind of frame has a fixed length, so we know what to expect.
if bytes.len() != ETH_IPV4_FRAME_LEN {
return Err(ArpError::SliceExactLen);
}
let maybe = EthIPv4ArpFrame::from_bytes_unchecked(bytes);
if maybe.htype() != HTYPE_ETHERNET {
return Err(ArpError::HType);
}
if maybe.ptype() != ETHERTYPE_IPV4 {
return Err(ArpError::PType);
}
// We could theoretically skip the hlen and plen checks, since they are kinda implicit.
if maybe.hlen() != MAC_ADDR_LEN {
return Err(ArpError::HLen);
}
if maybe.plen() != IPV4_ADDR_LEN {
return Err(ArpError::PLen);
}
if maybe.operation() != OPER_REQUEST {
return Err(ArpError::Operation);
}
Ok(maybe)
}
/// Returns the hardware type of the frame.
#[inline]
pub fn htype(&self) -> u16 {
self.bytes.ntohs_unchecked(HTYPE_OFFSET)
}
/// Returns the protocol type of the frame.
#[inline]
pub fn ptype(&self) -> u16 {
self.bytes.ntohs_unchecked(PTYPE_OFFSET)
}
/// Returns the hardware address length of the frame.
#[inline]
pub fn hlen(&self) -> u8 {
self.bytes[HLEN_OFFSET]
}
/// Returns the protocol address length of the frame.
#[inline]
pub fn plen(&self) -> u8 {
self.bytes[PLEN_OFFSET]
}
/// Returns the type of operation within the frame.
#[inline]
pub fn operation(&self) -> u16 {
self.bytes.ntohs_unchecked(OPER_OFFSET)
}
/// Returns the sender hardware address.
#[inline]
pub fn sha(&self) -> MacAddr {
MacAddr::from_bytes_unchecked(&self.bytes[SHA_OFFSET..ETH_IPV4_SPA_OFFSET])
}
/// Returns the sender protocol address.
#[inline]
pub fn spa(&self) -> Ipv4Addr {
Ipv4Addr::from(self.bytes.ntohl_unchecked(ETH_IPV4_SPA_OFFSET))
}
/// Returns the target hardware address.
#[inline]
pub fn tha(&self) -> MacAddr {
MacAddr::from_bytes_unchecked(&self.bytes[ETH_IPV4_THA_OFFSET..ETH_IPV4_TPA_OFFSET])
}
/// Returns the target protocol address.
#[inline]
pub fn tpa(&self) -> Ipv4Addr {
Ipv4Addr::from(self.bytes.ntohl_unchecked(ETH_IPV4_TPA_OFFSET))
}
/// Returns the length of the frame.
#[inline]
pub fn len(&self) -> usize {
// This might as well return ETH_IPV4_FRAME_LEN directly, since we check this is the actual
// length in request_from_bytes(). For some reason it seems nicer leaving it as is.
self.bytes.len()
}
}
impl<T: NetworkBytesMut + Debug> EthIPv4ArpFrame<'_, T> {
#[allow(clippy::too_many_arguments)]
fn write_raw(
buf: T,
htype: u16,
ptype: u16,
hlen: u8,
plen: u8,
operation: u16,
sha: MacAddr,
spa: Ipv4Addr,
tha: MacAddr,
tpa: Ipv4Addr,
) -> Result<Self, ArpError> {
if buf.len() != ETH_IPV4_FRAME_LEN {
return Err(ArpError::SliceExactLen);
}
// This is ok, because we've checked the length of the slice.
let mut frame = EthIPv4ArpFrame::from_bytes_unchecked(buf);
frame.set_htype(htype);
frame.set_ptype(ptype);
frame.set_hlen(hlen);
frame.set_plen(plen);
frame.set_operation(operation);
frame.set_sha(sha);
frame.set_spa(spa);
frame.set_tha(tha);
frame.set_tpa(tpa);
Ok(frame)
}
/// Attempts to write an ARP request to `buf`, based on the specified hardware and protocol
/// addresses.
#[inline]
pub fn write_request(
buf: T,
sha: MacAddr,
spa: Ipv4Addr,
tha: MacAddr,
tpa: Ipv4Addr,
) -> Result<Self, ArpError> {
Self::write_raw(
buf,
HTYPE_ETHERNET,
ETHERTYPE_IPV4,
MAC_ADDR_LEN,
IPV4_ADDR_LEN,
OPER_REQUEST,
sha,
spa,
tha,
tpa,
)
}
/// Attempts to write an ARP reply to `buf`, based on the specified hardware and protocol
/// addresses.
#[inline]
pub fn write_reply(
buf: T,
sha: MacAddr,
spa: Ipv4Addr,
tha: MacAddr,
tpa: Ipv4Addr,
) -> Result<Self, ArpError> {
Self::write_raw(
buf,
HTYPE_ETHERNET,
ETHERTYPE_IPV4,
MAC_ADDR_LEN,
IPV4_ADDR_LEN,
OPER_REPLY,
sha,
spa,
tha,
tpa,
)
}
/// Sets the hardware type of the frame.
#[inline]
pub fn set_htype(&mut self, value: u16) {
self.bytes.htons_unchecked(HTYPE_OFFSET, value);
}
/// Sets the protocol type of the frame.
#[inline]
pub fn set_ptype(&mut self, value: u16) {
self.bytes.htons_unchecked(PTYPE_OFFSET, value);
}
/// Sets the hardware address length of the frame.
#[inline]
pub fn set_hlen(&mut self, value: u8) {
self.bytes[HLEN_OFFSET] = value;
}
/// Sets the protocol address length of the frame.
#[inline]
pub fn set_plen(&mut self, value: u8) {
self.bytes[PLEN_OFFSET] = value;
}
/// Sets the operation within the frame.
#[inline]
pub fn set_operation(&mut self, value: u16) {
self.bytes.htons_unchecked(OPER_OFFSET, value);
}
/// Sets the sender hardware address.
#[inline]
pub fn set_sha(&mut self, addr: MacAddr) {
self.bytes[SHA_OFFSET..ETH_IPV4_SPA_OFFSET].copy_from_slice(addr.get_bytes());
}
/// Sets the sender protocol address.
#[inline]
pub fn set_spa(&mut self, addr: Ipv4Addr) {
self.bytes
.htonl_unchecked(ETH_IPV4_SPA_OFFSET, u32::from(addr));
}
/// Sets the target hardware address.
#[inline]
pub fn set_tha(&mut self, addr: MacAddr) {
self.bytes[ETH_IPV4_THA_OFFSET..ETH_IPV4_TPA_OFFSET].copy_from_slice(addr.get_bytes());
}
/// Sets the target protocol address.
#[inline]
pub fn set_tpa(&mut self, addr: Ipv4Addr) {
self.bytes
.htonl_unchecked(ETH_IPV4_TPA_OFFSET, u32::from(addr));
}
}
/// This function checks if `buf` may hold an Ethernet frame which encapsulates an
/// `EthIPv4ArpRequest` for the given address. Cannot produce false negatives.
#[inline]
pub fn test_speculative_tpa(buf: &[u8], addr: Ipv4Addr) -> bool {
// The unchecked methods are safe because we actually check the buffer length beforehand.
if buf.len() >= ethernet::PAYLOAD_OFFSET + ETH_IPV4_FRAME_LEN {
let bytes = &buf[ethernet::PAYLOAD_OFFSET..];
if EthIPv4ArpFrame::from_bytes_unchecked(bytes).tpa() == addr {
return true;
}
}
false
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::*;
#[test]
fn test_eth_ipv4_arp_frame() {
let mut a = [0u8; 1000];
let mut bad_array = [0u8; 1];
let sha = MacAddr::from_str("01:23:45:67:89:ab").unwrap();
let tha = MacAddr::from_str("cd:ef:01:23:45:67").unwrap();
let spa = Ipv4Addr::new(10, 1, 2, 3);
let tpa = Ipv4Addr::new(10, 4, 5, 6);
// Slice is too short.
assert_eq!(
EthIPv4ArpFrame::request_from_bytes(bad_array.as_ref()).unwrap_err(),
ArpError::SliceExactLen
);
// Slice is too short.
assert_eq!(
EthIPv4ArpFrame::write_reply(bad_array.as_mut(), sha, spa, tha, tpa).unwrap_err(),
ArpError::SliceExactLen
);
// Slice is too long.
assert_eq!(
EthIPv4ArpFrame::write_reply(a.as_mut(), sha, spa, tha, tpa).unwrap_err(),
ArpError::SliceExactLen
);
// We write a valid ARP reply to the specified slice.
{
let f = EthIPv4ArpFrame::write_reply(&mut a[..ETH_IPV4_FRAME_LEN], sha, spa, tha, tpa)
.unwrap();
// This is a bit redundant given the following tests, but assert away!
assert_eq!(f.htype(), HTYPE_ETHERNET);
assert_eq!(f.ptype(), ETHERTYPE_IPV4);
assert_eq!(f.hlen(), MAC_ADDR_LEN);
assert_eq!(f.plen(), IPV4_ADDR_LEN);
assert_eq!(f.operation(), OPER_REPLY);
assert_eq!(f.sha(), sha);
assert_eq!(f.spa(), spa);
assert_eq!(f.tha(), tha);
assert_eq!(f.tpa(), tpa);
}
// Now let's try to parse a request.
// Slice is too long.
assert_eq!(
EthIPv4ArpFrame::request_from_bytes(a.as_ref()).unwrap_err(),
ArpError::SliceExactLen
);
// The length is fine now, but the operation is a reply instead of request.
assert_eq!(
EthIPv4ArpFrame::request_from_bytes(&a[..ETH_IPV4_FRAME_LEN]).unwrap_err(),
ArpError::Operation
);
// Various requests
let requests = [
(
HTYPE_ETHERNET,
ETHERTYPE_IPV4,
MAC_ADDR_LEN,
IPV4_ADDR_LEN,
None,
), // Valid request
(
HTYPE_ETHERNET + 1,
ETHERTYPE_IPV4,
MAC_ADDR_LEN,
IPV4_ADDR_LEN,
Some(ArpError::HType),
), // Invalid htype
(
HTYPE_ETHERNET,
ETHERTYPE_IPV4 + 1,
MAC_ADDR_LEN,
IPV4_ADDR_LEN,
Some(ArpError::PType),
), // Invalid ptype
(
HTYPE_ETHERNET,
ETHERTYPE_IPV4,
MAC_ADDR_LEN + 1,
IPV4_ADDR_LEN,
Some(ArpError::HLen),
), // Invalid hlen
(
HTYPE_ETHERNET,
ETHERTYPE_IPV4,
MAC_ADDR_LEN,
IPV4_ADDR_LEN + 1,
Some(ArpError::PLen),
), // Invalid plen
];
for (htype, ptype, hlen, plen, err) in requests.iter() {
EthIPv4ArpFrame::write_raw(
&mut a[..ETH_IPV4_FRAME_LEN],
*htype,
*ptype,
*hlen,
*plen,
OPER_REQUEST,
sha,
spa,
tha,
tpa,
)
.unwrap();
match err {
None => {
EthIPv4ArpFrame::request_from_bytes(&a[..ETH_IPV4_FRAME_LEN]).unwrap();
}
Some(arp_error) => assert_eq!(
EthIPv4ArpFrame::request_from_bytes(&a[..ETH_IPV4_FRAME_LEN]).unwrap_err(),
*arp_error
),
}
}
}
#[test]
fn test_speculative() {
let mut a = [0u8; 1000];
let addr = Ipv4Addr::new(1, 2, 3, 4);
assert!(!test_speculative_tpa(a.as_ref(), addr));
{
let mac = MacAddr::from_bytes_unchecked(&[0; 6]);
let mut eth = crate::dumbo::pdu::ethernet::EthernetFrame::write_incomplete(
a.as_mut(),
mac,
mac,
0,
)
.unwrap();
let mut arp = EthIPv4ArpFrame::from_bytes_unchecked(eth.inner_mut().payload_mut());
arp.set_tpa(addr);
}
assert!(test_speculative_tpa(a.as_ref(), addr));
// Let's also test for a very small buffer.
let small = [0u8; 1];
assert!(!test_speculative_tpa(small.as_ref(), addr));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/acpi/x86_64.rs | src/vmm/src/acpi/x86_64.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::mem::size_of;
use acpi_tables::fadt::IAPC_BOOT_ARG_FLAGS_VGA_NOT_PRESENT;
use acpi_tables::madt::{IoAPIC, LocalAPIC};
use acpi_tables::{Fadt, aml};
use vm_memory::GuestAddress;
use zerocopy::IntoBytes;
use crate::arch::x86_64::layout;
use crate::device_manager::legacy::PortIODeviceManager;
#[inline(always)]
pub(crate) fn setup_interrupt_controllers(nr_vcpus: u8) -> Vec<u8> {
let mut ic =
Vec::with_capacity(size_of::<IoAPIC>() + (nr_vcpus as usize) * size_of::<LocalAPIC>());
ic.extend_from_slice(IoAPIC::new(0, layout::IOAPIC_ADDR).as_bytes());
for i in 0..nr_vcpus {
ic.extend_from_slice(LocalAPIC::new(i).as_bytes());
}
ic
}
#[inline(always)]
pub(crate) fn setup_arch_fadt(fadt: &mut Fadt) {
// Let the guest kernel know that there is not VGA hardware present
// neither do we support ASPM, or MSI type of interrupts.
// More info here:
// https://uefi.org/specs/ACPI/6.5/05_ACPI_Software_Programming_Model.html?highlight=0a06#ia-pc-boot-architecture-flags
fadt.setup_iapc_flags(1 << IAPC_BOOT_ARG_FLAGS_VGA_NOT_PRESENT);
}
#[inline(always)]
pub(crate) fn setup_arch_dsdt(dsdt_data: &mut Vec<u8>) -> Result<(), aml::AmlError> {
PortIODeviceManager::append_aml_bytes(dsdt_data)
}
pub(crate) const fn apic_addr() -> u32 {
layout::APIC_ADDR
}
pub(crate) const fn rsdp_addr() -> GuestAddress {
GuestAddress(layout::RSDP_ADDR)
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.