repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/mod.rs | src/vmm/src/vmm_config/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::{From, TryInto};
use std::io;
use serde::{Deserialize, Serialize};
use crate::rate_limiter::{BucketUpdate, RateLimiter, TokenBucket};
/// Wrapper for configuring the balloon device.
pub mod balloon;
/// Wrapper for configuring the microVM boot source.
pub mod boot_source;
/// Wrapper for configuring the block devices.
pub mod drive;
/// Wrapper for configuring the entropy device attached to the microVM.
pub mod entropy;
/// Wrapper over the microVM general information attached to the microVM.
pub mod instance_info;
/// Wrapper for configuring the memory and CPU of the microVM.
pub mod machine_config;
/// Wrapper for configuring memory hotplug.
pub mod memory_hotplug;
/// Wrapper for configuring the metrics.
pub mod metrics;
/// Wrapper for configuring the MMDS.
pub mod mmds;
/// Wrapper for configuring the network devices attached to the microVM.
pub mod net;
/// Wrapper for configuring the pmem devises attached to the microVM.
pub mod pmem;
/// Wrapper for configuring microVM snapshots and the microVM state.
pub mod serial;
pub mod snapshot;
/// Wrapper for configuring the vsock devices attached to the microVM.
pub mod vsock;
// TODO: Migrate the VMM public-facing code (i.e. interface) to use stateless structures,
// for receiving data/args, such as the below `RateLimiterConfig` and `TokenBucketConfig`.
// Also todo: find a better suffix than `Config`; it should illustrate the static nature
// of the enclosed data.
// Currently, data is passed around using live/stateful objects. Switching to static/stateless
// objects will simplify both the ownership model and serialization.
// Public access would then be more tightly regulated via `VmmAction`s, consisting of tuples like
// (entry-point-into-VMM-logic, stateless-args-structure).
/// A public-facing, stateless structure, holding all the data we need to create a TokenBucket
/// (live) object.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
pub struct TokenBucketConfig {
/// See TokenBucket::size.
pub size: u64,
/// See TokenBucket::one_time_burst.
pub one_time_burst: Option<u64>,
/// See TokenBucket::refill_time.
pub refill_time: u64,
}
impl From<&TokenBucket> for TokenBucketConfig {
fn from(tb: &TokenBucket) -> Self {
let one_time_burst = match tb.initial_one_time_burst() {
0 => None,
v => Some(v),
};
TokenBucketConfig {
size: tb.capacity(),
one_time_burst,
refill_time: tb.refill_time_ms(),
}
}
}
/// A public-facing, stateless structure, holding all the data we need to create a RateLimiter
/// (live) object.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct RateLimiterConfig {
/// Data used to initialize the RateLimiter::bandwidth bucket.
pub bandwidth: Option<TokenBucketConfig>,
/// Data used to initialize the RateLimiter::ops bucket.
pub ops: Option<TokenBucketConfig>,
}
/// A public-facing, stateless structure, specifying RateLimiter properties updates.
#[derive(Debug)]
pub struct RateLimiterUpdate {
/// Possible update to the RateLimiter::bandwidth bucket.
pub bandwidth: BucketUpdate,
/// Possible update to the RateLimiter::ops bucket.
pub ops: BucketUpdate,
}
fn get_bucket_update(tb_cfg: &Option<TokenBucketConfig>) -> BucketUpdate {
match tb_cfg {
// There is data to update.
Some(tb_cfg) => {
TokenBucket::new(
tb_cfg.size,
tb_cfg.one_time_burst.unwrap_or(0),
tb_cfg.refill_time,
)
// Updated active rate-limiter.
.map(BucketUpdate::Update)
// Updated/deactivated rate-limiter
.unwrap_or(BucketUpdate::Disabled)
}
// No update to the rate-limiter.
None => BucketUpdate::None,
}
}
impl From<Option<RateLimiterConfig>> for RateLimiterUpdate {
fn from(cfg: Option<RateLimiterConfig>) -> Self {
if let Some(cfg) = cfg {
RateLimiterUpdate {
bandwidth: get_bucket_update(&cfg.bandwidth),
ops: get_bucket_update(&cfg.ops),
}
} else {
// No update to the rate-limiter.
RateLimiterUpdate {
bandwidth: BucketUpdate::None,
ops: BucketUpdate::None,
}
}
}
}
impl TryInto<RateLimiter> for RateLimiterConfig {
type Error = io::Error;
fn try_into(self) -> Result<RateLimiter, Self::Error> {
let bw = self.bandwidth.unwrap_or_default();
let ops = self.ops.unwrap_or_default();
RateLimiter::new(
bw.size,
bw.one_time_burst.unwrap_or(0),
bw.refill_time,
ops.size,
ops.one_time_burst.unwrap_or(0),
ops.refill_time,
)
}
}
impl From<&RateLimiter> for RateLimiterConfig {
fn from(rl: &RateLimiter) -> Self {
RateLimiterConfig {
bandwidth: rl.bandwidth().map(TokenBucketConfig::from),
ops: rl.ops().map(TokenBucketConfig::from),
}
}
}
impl RateLimiterConfig {
/// [`Option<T>`] already implements [`From<T>`] so we have to use a custom
/// one.
pub fn into_option(self) -> Option<RateLimiterConfig> {
if self.bandwidth.is_some() || self.ops.is_some() {
Some(self)
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const SIZE: u64 = 1024 * 1024;
const ONE_TIME_BURST: u64 = 1024;
const REFILL_TIME: u64 = 1000;
#[test]
fn test_rate_limiter_configs() {
let rlconf = RateLimiterConfig {
bandwidth: Some(TokenBucketConfig {
size: SIZE,
one_time_burst: Some(ONE_TIME_BURST),
refill_time: REFILL_TIME,
}),
ops: Some(TokenBucketConfig {
size: SIZE * 2,
one_time_burst: None,
refill_time: REFILL_TIME * 2,
}),
};
let rl: RateLimiter = rlconf.try_into().unwrap();
assert_eq!(rl.bandwidth().unwrap().capacity(), SIZE);
assert_eq!(rl.bandwidth().unwrap().one_time_burst(), ONE_TIME_BURST);
assert_eq!(rl.bandwidth().unwrap().refill_time_ms(), REFILL_TIME);
assert_eq!(rl.ops().unwrap().capacity(), SIZE * 2);
assert_eq!(rl.ops().unwrap().one_time_burst(), 0);
assert_eq!(rl.ops().unwrap().refill_time_ms(), REFILL_TIME * 2);
}
#[test]
fn test_generate_configs() {
let bw_tb_cfg = TokenBucketConfig {
size: SIZE,
one_time_burst: Some(ONE_TIME_BURST),
refill_time: REFILL_TIME,
};
let bw_tb = TokenBucket::new(SIZE, ONE_TIME_BURST, REFILL_TIME).unwrap();
let generated_bw_tb_cfg = TokenBucketConfig::from(&bw_tb);
assert_eq!(generated_bw_tb_cfg, bw_tb_cfg);
let rl_conf = RateLimiterConfig {
bandwidth: Some(bw_tb_cfg),
ops: None,
};
let rl: RateLimiter = rl_conf.try_into().unwrap();
let generated_rl_conf = RateLimiterConfig::from(&rl);
assert_eq!(generated_rl_conf, rl_conf);
assert_eq!(generated_rl_conf.into_option(), Some(rl_conf));
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/memory_hotplug.rs | src/vmm/src/vmm_config/memory_hotplug.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::{Deserialize, Serialize};
use crate::devices::virtio::mem::{
VIRTIO_MEM_DEFAULT_BLOCK_SIZE_MIB, VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB,
};
/// Errors associated with memory hotplug configuration.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MemoryHotplugConfigError {
/// Block size must not be lower than {0} MiB
BlockSizeTooSmall(usize),
/// Block size must be a power of 2
BlockSizeNotPowerOfTwo,
/// Slot size must not be lower than {0} MiB
SlotSizeTooSmall(usize),
/// Slot size must be a multiple of block size ({0} MiB)
SlotSizeNotMultipleOfBlockSize(usize),
/// Total size must not be lower than slot size ({0} MiB)
TotalSizeTooSmall(usize),
/// Total size must be a multiple of slot size ({0} MiB)
TotalSizeNotMultipleOfSlotSize(usize),
}
fn default_block_size_mib() -> usize {
VIRTIO_MEM_DEFAULT_BLOCK_SIZE_MIB
}
fn default_slot_size_mib() -> usize {
VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB
}
/// Configuration for memory hotplug device.
#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct MemoryHotplugConfig {
/// Total memory size in MiB that can be hotplugged.
pub total_size_mib: usize,
/// Block size in MiB. A block is the smallest unit the guest can hot(un)plug
#[serde(default = "default_block_size_mib")]
pub block_size_mib: usize,
/// Slot size in MiB. A slot is the smallest unit the host can (de)attach memory
#[serde(default = "default_slot_size_mib")]
pub slot_size_mib: usize,
}
impl MemoryHotplugConfig {
/// Validates the configuration.
pub fn validate(&self) -> Result<(), MemoryHotplugConfigError> {
let min_block_size_mib = VIRTIO_MEM_DEFAULT_BLOCK_SIZE_MIB;
if self.block_size_mib < min_block_size_mib {
return Err(MemoryHotplugConfigError::BlockSizeTooSmall(
min_block_size_mib,
));
}
if !self.block_size_mib.is_power_of_two() {
return Err(MemoryHotplugConfigError::BlockSizeNotPowerOfTwo);
}
let min_slot_size_mib = VIRTIO_MEM_DEFAULT_SLOT_SIZE_MIB;
if self.slot_size_mib < min_slot_size_mib {
return Err(MemoryHotplugConfigError::SlotSizeTooSmall(
min_slot_size_mib,
));
}
if !self.slot_size_mib.is_multiple_of(self.block_size_mib) {
return Err(MemoryHotplugConfigError::SlotSizeNotMultipleOfBlockSize(
self.block_size_mib,
));
}
if self.total_size_mib < self.slot_size_mib {
return Err(MemoryHotplugConfigError::TotalSizeTooSmall(
self.slot_size_mib,
));
}
if !self.total_size_mib.is_multiple_of(self.slot_size_mib) {
return Err(MemoryHotplugConfigError::TotalSizeNotMultipleOfSlotSize(
self.slot_size_mib,
));
}
Ok(())
}
}
/// Configuration for memory hotplug device.
#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct MemoryHotplugSizeUpdate {
/// Requested size in MiB to resize the hotpluggable memory to.
pub requested_size_mib: usize,
}
#[cfg(test)]
mod tests {
use serde_json;
use super::*;
#[test]
fn test_valid_config() {
let config = MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 2,
slot_size_mib: 128,
};
config.validate().unwrap();
}
#[test]
fn test_block_size_too_small() {
let config = MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 1,
slot_size_mib: 128,
};
match config.validate() {
Err(MemoryHotplugConfigError::BlockSizeTooSmall(min)) => assert_eq!(min, 2),
_ => panic!("Expected InvalidBlockSizeTooSmall error"),
}
}
#[test]
fn test_block_size_not_power_of_two() {
let config = MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 3,
slot_size_mib: 128,
};
match config.validate() {
Err(MemoryHotplugConfigError::BlockSizeNotPowerOfTwo) => {}
_ => panic!("Expected InvalidBlockSizePowerOfTwo error"),
}
}
#[test]
fn test_slot_size_too_small() {
let config = MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 2,
slot_size_mib: 1,
};
match config.validate() {
Err(MemoryHotplugConfigError::SlotSizeTooSmall(min)) => assert_eq!(min, 128),
_ => panic!("Expected InvalidSlotSizeTooSmall error"),
}
}
#[test]
fn test_slot_size_not_multiple_of_block_size() {
let config = MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 4,
slot_size_mib: 130,
};
match config.validate() {
Err(MemoryHotplugConfigError::SlotSizeNotMultipleOfBlockSize(block_size)) => {
assert_eq!(block_size, 4)
}
_ => panic!("Expected InvalidSlotSizeMultiple error"),
}
}
#[test]
fn test_total_size_too_small() {
let config = MemoryHotplugConfig {
total_size_mib: 64,
block_size_mib: 2,
slot_size_mib: 128,
};
match config.validate() {
Err(MemoryHotplugConfigError::TotalSizeTooSmall(slot_size)) => {
assert_eq!(slot_size, 128)
}
_ => panic!("Expected InvalidTotalSizeTooSmall error"),
}
}
#[test]
fn test_total_size_not_multiple_of_slot_size() {
let config = MemoryHotplugConfig {
total_size_mib: 1000,
block_size_mib: 2,
slot_size_mib: 128,
};
match config.validate() {
Err(MemoryHotplugConfigError::TotalSizeNotMultipleOfSlotSize(slot_size)) => {
assert_eq!(slot_size, 128)
}
_ => panic!("Expected InvalidTotalSizeMultiple error"),
}
}
#[test]
fn test_defaults() {
assert_eq!(default_block_size_mib(), 2);
assert_eq!(default_slot_size_mib(), 128);
let json = r#"{
"total_size_mib": 1024
}"#;
let deserialized: MemoryHotplugConfig = serde_json::from_str(json).unwrap();
assert_eq!(
deserialized,
MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 2,
slot_size_mib: 128,
}
);
}
#[test]
fn test_serde() {
let config = MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 4,
slot_size_mib: 256,
};
let json = serde_json::to_string(&config).unwrap();
let deserialized: MemoryHotplugConfig = serde_json::from_str(&json).unwrap();
assert_eq!(config, deserialized);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/mmds.rs | src/vmm/src/vmm_config/mmds.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::net::Ipv4Addr;
use serde::{Deserialize, Serialize};
use crate::mmds::data_store;
use crate::mmds::data_store::MmdsVersion;
/// Keeps the MMDS configuration.
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct MmdsConfig {
/// MMDS version.
#[serde(default)]
pub version: MmdsVersion,
/// Network interfaces that allow forwarding packets to MMDS.
pub network_interfaces: Vec<String>,
/// MMDS IPv4 configured address.
pub ipv4_address: Option<Ipv4Addr>,
/// Compatibility with EC2 IMDS.
#[serde(default)]
pub imds_compat: bool,
}
impl MmdsConfig {
/// Returns the MMDS version configured.
pub fn version(&self) -> MmdsVersion {
self.version
}
/// Returns the network interfaces that accept MMDS requests.
pub fn network_interfaces(&self) -> Vec<String> {
self.network_interfaces.clone()
}
/// Returns the MMDS IPv4 address if one was configured.
/// Otherwise returns None.
pub fn ipv4_addr(&self) -> Option<Ipv4Addr> {
self.ipv4_address
}
}
/// MMDS configuration related errors.
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MmdsConfigError {
/// The list of network interface IDs that allow forwarding MMDS requests is empty.
EmptyNetworkIfaceList,
/// The MMDS IPv4 address is not link local.
InvalidIpv4Addr,
/// The list of network interface IDs provided contains at least one ID that does not correspond to any existing network interface.
InvalidNetworkInterfaceId,
/// Failed to initialize MMDS data store: {0}
InitMmdsDatastore(#[from] data_store::MmdsDatastoreError),
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/boot_source.rs | src/vmm/src/vmm_config/boot_source.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::File;
use std::io;
use serde::{Deserialize, Serialize};
/// Default guest kernel command line:
/// - `reboot=k` shut down the guest on reboot, instead of well... rebooting;
/// - `panic=1` on panic, reboot after 1 second;
/// - `nomodule` disable loadable kernel module support;
/// - `8250.nr_uarts=0` disable 8250 serial interface;
/// - `i8042.noaux` do not probe the i8042 controller for an attached mouse (save boot time);
/// - `i8042.nomux` do not probe i8042 for a multiplexing controller (save boot time);
/// - `i8042.dumbkbd` do not attempt to control kbd state via the i8042 (save boot time).
/// - `swiotlb=noforce` disable software bounce buffers (SWIOTLB)
pub const DEFAULT_KERNEL_CMDLINE: &str = "reboot=k panic=1 nomodule 8250.nr_uarts=0 i8042.noaux \
i8042.nomux i8042.dumbkbd swiotlb=noforce";
/// Strongly typed data structure used to configure the boot source of the
/// microvm.
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
#[serde(deny_unknown_fields)]
pub struct BootSourceConfig {
/// Path of the kernel image.
pub kernel_image_path: String,
/// Path of the initrd, if there is one.
pub initrd_path: Option<String>,
/// The boot arguments to pass to the kernel. If this field is uninitialized,
/// DEFAULT_KERNEL_CMDLINE is used.
pub boot_args: Option<String>,
}
/// Errors associated with actions on `BootSourceConfig`.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum BootSourceConfigError {
/// The kernel file cannot be opened: {0}
InvalidKernelPath(io::Error),
/// The initrd file cannot be opened due to invalid path or invalid permissions. {0}
InvalidInitrdPath(io::Error),
/// The kernel command line is invalid: {0}
InvalidKernelCommandLine(String),
}
/// Holds the kernel specification (both configuration as well as runtime details).
#[derive(Debug, Default)]
pub struct BootSource {
/// The boot source configuration.
pub config: BootSourceConfig,
/// The boot source builder (a boot source allocated and validated).
/// It is an option cause a resumed microVM does not need it.
pub builder: Option<BootConfig>,
}
/// Holds the kernel builder (created and validates based on BootSourceConfig).
#[derive(Debug)]
pub struct BootConfig {
/// The commandline validated against correctness.
pub cmdline: linux_loader::cmdline::Cmdline,
/// The descriptor to the kernel file.
pub kernel_file: File,
/// The descriptor to the initrd file, if there is one.
pub initrd_file: Option<File>,
}
impl BootConfig {
/// Creates the BootConfig based on a given configuration.
pub fn new(cfg: &BootSourceConfig) -> Result<Self, BootSourceConfigError> {
use self::BootSourceConfigError::{
InvalidInitrdPath, InvalidKernelCommandLine, InvalidKernelPath,
};
// Validate boot source config.
let kernel_file = File::open(&cfg.kernel_image_path).map_err(InvalidKernelPath)?;
let initrd_file: Option<File> = match &cfg.initrd_path {
Some(path) => Some(File::open(path).map_err(InvalidInitrdPath)?),
None => None,
};
let cmdline_str = match cfg.boot_args.as_ref() {
None => DEFAULT_KERNEL_CMDLINE,
Some(str) => str.as_str(),
};
let cmdline =
linux_loader::cmdline::Cmdline::try_from(cmdline_str, crate::arch::CMDLINE_MAX_SIZE)
.map_err(|err| InvalidKernelCommandLine(err.to_string()))?;
Ok(BootConfig {
cmdline,
kernel_file,
initrd_file,
})
}
}
#[cfg(test)]
pub(crate) mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::snapshot::Snapshot;
#[test]
fn test_boot_config() {
let kernel_file = TempFile::new().unwrap();
let kernel_path = kernel_file.as_path().to_str().unwrap().to_string();
let boot_src_cfg = BootSourceConfig {
boot_args: None,
initrd_path: None,
kernel_image_path: kernel_path,
};
let boot_cfg = BootConfig::new(&boot_src_cfg).unwrap();
assert!(boot_cfg.initrd_file.is_none());
assert_eq!(
boot_cfg.cmdline.as_cstring().unwrap().as_bytes_with_nul(),
[DEFAULT_KERNEL_CMDLINE.as_bytes(), b"\0"].concat()
);
}
#[test]
fn test_serde() {
let boot_src_cfg = BootSourceConfig {
boot_args: Some(DEFAULT_KERNEL_CMDLINE.to_string()),
initrd_path: Some("/tmp/initrd".to_string()),
kernel_image_path: "./vmlinux.bin".to_string(),
};
let mut snapshot_data = vec![0u8; 1000];
Snapshot::new(&boot_src_cfg)
.save(&mut snapshot_data.as_mut_slice())
.unwrap();
let restored_boot_cfg = Snapshot::load_without_crc_check(snapshot_data.as_slice())
.unwrap()
.data;
assert_eq!(boot_src_cfg, restored_boot_cfg);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/drive.rs | src/vmm/src/vmm_config/drive.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::VecDeque;
use std::io;
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use super::RateLimiterConfig;
use crate::VmmError;
use crate::devices::virtio::block::device::Block;
pub use crate::devices::virtio::block::virtio::device::FileEngineType;
use crate::devices::virtio::block::{BlockError, CacheType};
/// Errors associated with the operations allowed on a drive.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum DriveError {
/// Attempt to add block as a root device while the root device defined as a pmem device
AddingSecondRootDevice,
/// Unable to create the virtio block device: {0}
CreateBlockDevice(BlockError),
/// Cannot create RateLimiter: {0}
CreateRateLimiter(io::Error),
/// Unable to patch the block device: {0} Please verify the request arguments.
DeviceUpdate(VmmError),
/// A root block device already exists!
RootBlockDeviceAlreadyAdded,
}
/// Use this structure to set up the Block Device before booting the kernel.
#[derive(Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct BlockDeviceConfig {
/// Unique identifier of the drive.
pub drive_id: String,
/// Part-UUID. Represents the unique id of the boot partition of this device. It is
/// optional and it will be used only if the `is_root_device` field is true.
pub partuuid: Option<String>,
/// If set to true, it makes the current device the root block device.
/// Setting this flag to true will mount the block device in the
/// guest under /dev/vda unless the partuuid is present.
pub is_root_device: bool,
/// If set to true, the drive will ignore flush requests coming from
/// the guest driver.
#[serde(default)]
pub cache_type: CacheType,
// VirtioBlock specific fields
/// If set to true, the drive is opened in read-only mode. Otherwise, the
/// drive is opened as read-write.
pub is_read_only: Option<bool>,
/// Path of the drive.
pub path_on_host: Option<String>,
/// Rate Limiter for I/O operations.
pub rate_limiter: Option<RateLimiterConfig>,
/// The type of IO engine used by the device.
// #[serde(default)]
// #[serde(rename = "io_engine")]
// pub file_engine_type: FileEngineType,
#[serde(rename = "io_engine")]
pub file_engine_type: Option<FileEngineType>,
// VhostUserBlock specific fields
/// Path to the vhost-user socket.
pub socket: Option<String>,
}
/// Only provided fields will be updated. I.e. if any optional fields
/// are missing, they will not be updated.
#[derive(Debug, Default, PartialEq, Eq, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct BlockDeviceUpdateConfig {
/// The drive ID, as provided by the user at creation time.
pub drive_id: String,
// VirtioBlock sepcific fields
/// New block file path on the host. Only provided data will be updated.
pub path_on_host: Option<String>,
/// New rate limiter config.
pub rate_limiter: Option<RateLimiterConfig>,
}
/// Wrapper for the collection that holds all the Block Devices
#[derive(Debug, Default)]
pub struct BlockBuilder {
/// The list of block devices.
/// There can be at most one root block device and it would be the first in the list.
// Root Device should be the first in the list whether or not PARTUUID is
// specified in order to avoid bugs in case of switching from partuuid boot
// scenarios to /dev/vda boot type.
pub devices: VecDeque<Arc<Mutex<Block>>>,
}
impl BlockBuilder {
/// Constructor for BlockDevices. It initializes an empty LinkedList.
pub fn new() -> Self {
Self {
devices: Default::default(),
}
}
/// Specifies whether there is a root block device already present in the list.
pub fn has_root_device(&self) -> bool {
// If there is a root device, it would be at the top of the list.
if let Some(block) = self.devices.front() {
block.lock().expect("Poisoned lock").root_device()
} else {
false
}
}
/// Gets the index of the device with the specified `drive_id` if it exists in the list.
fn get_index_of_drive_id(&self, drive_id: &str) -> Option<usize> {
self.devices
.iter()
.position(|b| b.lock().expect("Poisoned lock").id().eq(drive_id))
}
/// Inserts an existing block device.
pub fn add_virtio_device(&mut self, block_device: Arc<Mutex<Block>>) {
if block_device.lock().expect("Poisoned lock").root_device() {
self.devices.push_front(block_device);
} else {
self.devices.push_back(block_device);
}
}
/// Inserts a `Block` in the block devices list using the specified configuration.
/// If a block with the same id already exists, it will overwrite it.
/// Inserting a secondary root block device will fail.
pub fn insert(
&mut self,
config: BlockDeviceConfig,
has_pmem_root: bool,
) -> Result<(), DriveError> {
let position = self.get_index_of_drive_id(&config.drive_id);
let has_root_device = self.has_root_device();
let configured_as_root = config.is_root_device;
if configured_as_root && has_pmem_root {
return Err(DriveError::AddingSecondRootDevice);
}
// Don't allow adding a second root block device.
// If the new device cfg is root and not an update to the existing root, fail fast.
if configured_as_root && has_root_device && position != Some(0) {
return Err(DriveError::RootBlockDeviceAlreadyAdded);
}
let block_dev = Arc::new(Mutex::new(
Block::new(config).map_err(DriveError::CreateBlockDevice)?,
));
// If the id of the drive already exists in the list, the operation is update/overwrite.
match position {
// New block device.
None => {
if configured_as_root {
self.devices.push_front(block_dev);
} else {
self.devices.push_back(block_dev);
}
}
// Update existing block device.
Some(index) => {
// Update the slot with the new block.
self.devices[index] = block_dev;
// Check if the root block device is being updated.
if index != 0 && configured_as_root {
// Make sure the root device is on the first position.
self.devices.swap(0, index);
}
}
}
Ok(())
}
/// Returns a vec with the structures used to configure the devices.
pub fn configs(&self) -> Vec<BlockDeviceConfig> {
self.devices
.iter()
.map(|b| b.lock().unwrap().config())
.collect()
}
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::devices::virtio::block::virtio::VirtioBlockError;
impl PartialEq for DriveError {
fn eq(&self, other: &DriveError) -> bool {
self.to_string() == other.to_string()
}
}
// This implementation is used only in tests.
// We cannot directly derive clone because RateLimiter does not implement clone.
impl Clone for BlockDeviceConfig {
fn clone(&self) -> Self {
BlockDeviceConfig {
drive_id: self.drive_id.clone(),
partuuid: self.partuuid.clone(),
is_root_device: self.is_root_device,
is_read_only: self.is_read_only,
cache_type: self.cache_type,
path_on_host: self.path_on_host.clone(),
rate_limiter: self.rate_limiter,
file_engine_type: self.file_engine_type,
socket: self.socket.clone(),
}
}
}
#[test]
fn test_create_block_devs() {
let block_devs = BlockBuilder::new();
assert_eq!(block_devs.devices.len(), 0);
}
#[test]
fn test_add_non_root_block_device() {
let dummy_file = TempFile::new().unwrap();
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let dummy_id = String::from("1");
let dummy_block_device = BlockDeviceConfig {
drive_id: dummy_id.clone(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Writeback,
is_read_only: Some(false),
path_on_host: Some(dummy_path),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let mut block_devs = BlockBuilder::new();
block_devs
.insert(dummy_block_device.clone(), false)
.unwrap();
assert!(!block_devs.has_root_device());
assert_eq!(block_devs.devices.len(), 1);
assert_eq!(block_devs.get_index_of_drive_id(&dummy_id), Some(0));
let block = block_devs.devices[0].lock().unwrap();
assert_eq!(block.id(), dummy_block_device.drive_id);
assert_eq!(block.partuuid(), &dummy_block_device.partuuid);
assert_eq!(block.read_only(), dummy_block_device.is_read_only.unwrap());
}
#[test]
fn test_add_one_root_block_device() {
let dummy_file = TempFile::new().unwrap();
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let dummy_block_device = BlockDeviceConfig {
drive_id: String::from("1"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(true),
path_on_host: Some(dummy_path),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let mut block_devs = BlockBuilder::new();
block_devs
.insert(dummy_block_device.clone(), false)
.unwrap();
assert!(block_devs.has_root_device());
assert_eq!(block_devs.devices.len(), 1);
let block = block_devs.devices[0].lock().unwrap();
assert_eq!(block.id(), dummy_block_device.drive_id);
assert_eq!(block.partuuid(), &dummy_block_device.partuuid);
assert_eq!(block.read_only(), dummy_block_device.is_read_only.unwrap());
}
#[test]
fn test_add_one_root_block_device_with_pmem_already_as_root() {
let dummy_file = TempFile::new().unwrap();
let dummy_path = dummy_file.as_path().to_str().unwrap().to_string();
let dummy_block_device = BlockDeviceConfig {
drive_id: String::from("1"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(true),
path_on_host: Some(dummy_path),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let mut block_devs = BlockBuilder::new();
assert!(matches!(
block_devs
.insert(dummy_block_device.clone(), true)
.unwrap_err(),
DriveError::AddingSecondRootDevice,
));
assert!(!block_devs.has_root_device());
assert_eq!(block_devs.devices.len(), 0);
}
#[test]
fn test_add_two_root_block_devs() {
let dummy_file_1 = TempFile::new().unwrap();
let dummy_path_1 = dummy_file_1.as_path().to_str().unwrap().to_string();
let root_block_device_1 = BlockDeviceConfig {
drive_id: String::from("1"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_1),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let dummy_file_2 = TempFile::new().unwrap();
let dummy_path_2 = dummy_file_2.as_path().to_str().unwrap().to_string();
let root_block_device_2 = BlockDeviceConfig {
drive_id: String::from("2"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_2),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let mut block_devs = BlockBuilder::new();
block_devs.insert(root_block_device_1, false).unwrap();
assert_eq!(
block_devs.insert(root_block_device_2, false).unwrap_err(),
DriveError::RootBlockDeviceAlreadyAdded
);
}
#[test]
// Test BlockDevicesConfigs::add when you first add the root device and then the other devices.
fn test_add_root_block_device_first() {
let dummy_file_1 = TempFile::new().unwrap();
let dummy_path_1 = dummy_file_1.as_path().to_str().unwrap().to_string();
let root_block_device = BlockDeviceConfig {
drive_id: String::from("1"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_1),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let dummy_file_2 = TempFile::new().unwrap();
let dummy_path_2 = dummy_file_2.as_path().to_str().unwrap().to_string();
let dummy_block_dev_2 = BlockDeviceConfig {
drive_id: String::from("2"),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_2),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let dummy_file_3 = TempFile::new().unwrap();
let dummy_path_3 = dummy_file_3.as_path().to_str().unwrap().to_string();
let dummy_block_dev_3 = BlockDeviceConfig {
drive_id: String::from("3"),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_3),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let mut block_devs = BlockBuilder::new();
block_devs.insert(dummy_block_dev_2.clone(), false).unwrap();
block_devs.insert(dummy_block_dev_3.clone(), false).unwrap();
block_devs.insert(root_block_device.clone(), false).unwrap();
assert_eq!(block_devs.devices.len(), 3);
let mut block_iter = block_devs.devices.iter();
assert_eq!(
block_iter.next().unwrap().lock().unwrap().id(),
root_block_device.drive_id
);
assert_eq!(
block_iter.next().unwrap().lock().unwrap().id(),
dummy_block_dev_2.drive_id
);
assert_eq!(
block_iter.next().unwrap().lock().unwrap().id(),
dummy_block_dev_3.drive_id
);
}
#[test]
// Test BlockDevicesConfigs::add when you add other devices first and then the root device.
fn test_root_block_device_add_last() {
let dummy_file_1 = TempFile::new().unwrap();
let dummy_path_1 = dummy_file_1.as_path().to_str().unwrap().to_string();
let root_block_device = BlockDeviceConfig {
drive_id: String::from("1"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_1),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let dummy_file_2 = TempFile::new().unwrap();
let dummy_path_2 = dummy_file_2.as_path().to_str().unwrap().to_string();
let dummy_block_dev_2 = BlockDeviceConfig {
drive_id: String::from("2"),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_2),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let dummy_file_3 = TempFile::new().unwrap();
let dummy_path_3 = dummy_file_3.as_path().to_str().unwrap().to_string();
let dummy_block_dev_3 = BlockDeviceConfig {
drive_id: String::from("3"),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_3),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let mut block_devs = BlockBuilder::new();
block_devs.insert(dummy_block_dev_2.clone(), false).unwrap();
block_devs.insert(dummy_block_dev_3.clone(), false).unwrap();
block_devs.insert(root_block_device.clone(), false).unwrap();
assert_eq!(block_devs.devices.len(), 3);
let mut block_iter = block_devs.devices.iter();
// The root device should be first in the list no matter of the order in
// which the devices were added.
assert_eq!(
block_iter.next().unwrap().lock().unwrap().id(),
root_block_device.drive_id
);
assert_eq!(
block_iter.next().unwrap().lock().unwrap().id(),
dummy_block_dev_2.drive_id
);
assert_eq!(
block_iter.next().unwrap().lock().unwrap().id(),
dummy_block_dev_3.drive_id
);
}
#[test]
fn test_update() {
let dummy_file_1 = TempFile::new().unwrap();
let dummy_path_1 = dummy_file_1.as_path().to_str().unwrap().to_string();
let root_block_device = BlockDeviceConfig {
drive_id: String::from("1"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_1.clone()),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let dummy_file_2 = TempFile::new().unwrap();
let dummy_path_2 = dummy_file_2.as_path().to_str().unwrap().to_string();
let mut dummy_block_device_2 = BlockDeviceConfig {
drive_id: String::from("2"),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_2.clone()),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let mut block_devs = BlockBuilder::new();
// Add 2 block devices.
block_devs.insert(root_block_device, false).unwrap();
block_devs
.insert(dummy_block_device_2.clone(), false)
.unwrap();
// Get index zero.
assert_eq!(
block_devs.get_index_of_drive_id(&String::from("1")),
Some(0)
);
// Get None.
assert!(
block_devs
.get_index_of_drive_id(&String::from("foo"))
.is_none()
);
// Test several update cases using dummy_block_device_2.
// Validate `dummy_block_device_2` is already in the list
assert!(
block_devs
.get_index_of_drive_id(&dummy_block_device_2.drive_id)
.is_some()
);
// Update OK.
dummy_block_device_2.is_read_only = Some(true);
block_devs
.insert(dummy_block_device_2.clone(), false)
.unwrap();
let index = block_devs
.get_index_of_drive_id(&dummy_block_device_2.drive_id)
.unwrap();
// Validate update was successful.
assert!(block_devs.devices[index].lock().unwrap().read_only());
// Update with invalid path.
let dummy_path_3 = String::from("test_update_3");
dummy_block_device_2.path_on_host = Some(dummy_path_3);
assert!(matches!(
block_devs.insert(dummy_block_device_2.clone(), false),
Err(DriveError::CreateBlockDevice(BlockError::VirtioBackend(
VirtioBlockError::BackingFile(_, _)
)))
));
// Update with 2 root block devices.
dummy_block_device_2.path_on_host = Some(dummy_path_2.clone());
dummy_block_device_2.is_root_device = true;
assert_eq!(
block_devs.insert(dummy_block_device_2, false),
Err(DriveError::RootBlockDeviceAlreadyAdded)
);
let root_block_device = BlockDeviceConfig {
drive_id: String::from("1"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_1),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
// Switch roots and add a PARTUUID for the new one.
let mut root_block_device_old = root_block_device;
root_block_device_old.is_root_device = false;
let root_block_device_new = BlockDeviceConfig {
drive_id: String::from("2"),
partuuid: Some("0eaa91a0-01".to_string()),
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(dummy_path_2),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
block_devs.insert(root_block_device_old, false).unwrap();
let root_block_id = root_block_device_new.drive_id.clone();
block_devs.insert(root_block_device_new, false).unwrap();
assert!(block_devs.has_root_device());
// Verify it's been moved to the first position.
assert_eq!(block_devs.devices[0].lock().unwrap().id(), root_block_id);
}
#[test]
fn test_block_config() {
let dummy_file = TempFile::new().unwrap();
let dummy_block_device = BlockDeviceConfig {
drive_id: String::from("1"),
partuuid: None,
is_root_device: true,
cache_type: CacheType::Unsafe,
is_read_only: Some(true),
path_on_host: Some(dummy_file.as_path().to_str().unwrap().to_string()),
rate_limiter: None,
file_engine_type: Some(FileEngineType::Sync),
socket: None,
};
let mut block_devs = BlockBuilder::new();
block_devs
.insert(dummy_block_device.clone(), false)
.unwrap();
let configs = block_devs.configs();
assert_eq!(configs.len(), 1);
assert_eq!(configs.first().unwrap(), &dummy_block_device);
}
#[test]
fn test_add_device() {
let mut block_devs = BlockBuilder::new();
let backing_file = TempFile::new().unwrap();
let block_id = "test_id";
let config = BlockDeviceConfig {
drive_id: block_id.to_string(),
partuuid: None,
is_root_device: true,
cache_type: CacheType::default(),
is_read_only: Some(true),
path_on_host: Some(backing_file.as_path().to_str().unwrap().to_string()),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let block = Block::new(config).unwrap();
block_devs.add_virtio_device(Arc::new(Mutex::new(block)));
assert_eq!(block_devs.devices.len(), 1);
assert_eq!(
block_devs.devices.pop_back().unwrap().lock().unwrap().id(),
block_id
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/metrics.rs | src/vmm/src/vmm_config/metrics.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Auxiliary module for configuring the metrics system.
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
use crate::logger::{FcLineWriter, METRICS};
use crate::utils::open_file_write_nonblock;
/// Strongly typed structure used to describe the metrics system.
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct MetricsConfig {
/// Named pipe or file used as output for metrics.
pub metrics_path: PathBuf,
}
/// Errors associated with actions on the `MetricsConfig`.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MetricsConfigError {
/// Cannot initialize the metrics system due to bad user input: {0}
InitializationFailure(String),
}
/// Configures the metrics as described in `metrics_cfg`.
pub fn init_metrics(metrics_cfg: MetricsConfig) -> Result<(), MetricsConfigError> {
let writer = FcLineWriter::new(
open_file_write_nonblock(&metrics_cfg.metrics_path)
.map_err(|err| MetricsConfigError::InitializationFailure(err.to_string()))?,
);
METRICS
.init(writer)
.map_err(|err| MetricsConfigError::InitializationFailure(err.to_string()))
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
#[test]
fn test_init_metrics() {
// Initializing metrics with valid pipe is ok.
let metrics_file = TempFile::new().unwrap();
let desc = MetricsConfig {
metrics_path: metrics_file.as_path().to_path_buf(),
};
init_metrics(desc.clone()).unwrap();
init_metrics(desc).unwrap_err();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/snapshot.rs | src/vmm/src/vmm_config/snapshot.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Configurations used in the snapshotting context.
use std::path::PathBuf;
/// For crates that depend on `vmm` we export.
pub use semver::Version;
use serde::{Deserialize, Serialize};
/// The snapshot type options that are available when
/// creating a new snapshot.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
pub enum SnapshotType {
/// Diff snapshot.
Diff,
/// Full snapshot.
#[default]
Full,
}
/// Specifies the method through which guest memory will get populated when
/// resuming from a snapshot:
/// 1) A file that contains the guest memory to be loaded,
/// 2) An UDS where a custom page-fault handler process is listening for the UFFD set up by
/// Firecracker to handle its guest memory page faults.
#[derive(Debug, PartialEq, Eq, Deserialize)]
pub enum MemBackendType {
/// Guest memory contents will be loaded from a file.
File,
/// Guest memory will be served through UFFD by a separate process.
Uffd,
}
/// Stores the configuration that will be used for creating a snapshot.
#[derive(Debug, PartialEq, Eq, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct CreateSnapshotParams {
/// This marks the type of snapshot we want to create.
/// The default value is `Full`, which means a full snapshot.
#[serde(default = "SnapshotType::default")]
pub snapshot_type: SnapshotType,
/// Path to the file that will contain the microVM state.
pub snapshot_path: PathBuf,
/// Path to the file that will contain the guest memory.
pub mem_file_path: PathBuf,
}
/// Allows for changing the mapping between tap devices and host devices
/// during snapshot restore
#[derive(Debug, PartialEq, Eq, Deserialize)]
pub struct NetworkOverride {
/// The index of the interface to modify
pub iface_id: String,
/// The new name of the interface to be assigned
pub host_dev_name: String,
}
/// Stores the configuration that will be used for loading a snapshot.
#[derive(Debug, PartialEq, Eq)]
pub struct LoadSnapshotParams {
/// Path to the file that contains the microVM state to be loaded.
pub snapshot_path: PathBuf,
/// Specifies guest memory backend configuration.
pub mem_backend: MemBackendConfig,
/// Whether KVM dirty page tracking should be enabled, to space optimization
/// of differential snapshots.
pub track_dirty_pages: bool,
/// When set to true, the vm is also resumed if the snapshot load
/// is successful.
pub resume_vm: bool,
/// The network devices to override on load.
pub network_overrides: Vec<NetworkOverride>,
}
/// Stores the configuration for loading a snapshot that is provided by the user.
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct LoadSnapshotConfig {
/// Path to the file that contains the microVM state to be loaded.
pub snapshot_path: PathBuf,
/// Path to the file that contains the guest memory to be loaded. To be used only if
/// `mem_backend` is not specified.
#[serde(skip_serializing_if = "Option::is_none")]
pub mem_file_path: Option<PathBuf>,
/// Guest memory backend configuration. Is not to be used in conjunction with `mem_file_path`.
/// None value is allowed only if `mem_file_path` is present.
#[serde(skip_serializing_if = "Option::is_none")]
pub mem_backend: Option<MemBackendConfig>,
/// Whether or not to enable KVM dirty page tracking.
#[serde(default)]
#[deprecated]
pub enable_diff_snapshots: bool,
/// Whether KVM dirty page tracking should be enabled.
#[serde(default)]
pub track_dirty_pages: bool,
/// Whether or not to resume the vm post snapshot load.
#[serde(default)]
pub resume_vm: bool,
/// The network devices to override on load.
#[serde(default)]
pub network_overrides: Vec<NetworkOverride>,
}
/// Stores the configuration used for managing snapshot memory.
#[derive(Debug, PartialEq, Eq, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct MemBackendConfig {
/// Path to the backend used to handle the guest memory.
pub backend_path: PathBuf,
/// Specifies the guest memory backend type.
pub backend_type: MemBackendType,
}
/// The microVM state options.
#[derive(Debug, Deserialize, Serialize)]
pub enum VmState {
/// The microVM is paused, which means that we can create a snapshot of it.
Paused,
/// The microVM is resumed; this state should be set after we load a snapshot.
Resumed,
}
/// Keeps the microVM state necessary in the snapshotting context.
#[derive(Debug, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Vm {
/// The microVM state, which can be `paused` or `resumed`.
pub state: VmState,
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/net.rs | src/vmm/src/vmm_config/net.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::convert::TryInto;
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use super::RateLimiterConfig;
use crate::VmmError;
use crate::devices::virtio::net::{Net, TapError};
use crate::utils::net::mac::MacAddr;
/// This struct represents the strongly typed equivalent of the json body from net iface
/// related requests.
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct NetworkInterfaceConfig {
/// ID of the guest network interface.
pub iface_id: String,
/// Host level path for the guest network interface.
pub host_dev_name: String,
/// Guest MAC address.
pub guest_mac: Option<MacAddr>,
/// Rate Limiter for received packages.
pub rx_rate_limiter: Option<RateLimiterConfig>,
/// Rate Limiter for transmitted packages.
pub tx_rate_limiter: Option<RateLimiterConfig>,
}
impl From<&Net> for NetworkInterfaceConfig {
fn from(net: &Net) -> Self {
let rx_rl: RateLimiterConfig = net.rx_rate_limiter().into();
let tx_rl: RateLimiterConfig = net.tx_rate_limiter().into();
NetworkInterfaceConfig {
iface_id: net.id().clone(),
host_dev_name: net.iface_name(),
guest_mac: net.guest_mac().copied(),
rx_rate_limiter: rx_rl.into_option(),
tx_rate_limiter: tx_rl.into_option(),
}
}
}
/// The data fed into a network iface update request. Currently, only the RX and TX rate limiters
/// can be updated.
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct NetworkInterfaceUpdateConfig {
/// The net iface ID, as provided by the user at iface creation time.
pub iface_id: String,
/// New RX rate limiter config. Only provided data will be updated. I.e. if any optional data
/// is missing, it will not be nullified, but left unchanged.
pub rx_rate_limiter: Option<RateLimiterConfig>,
/// New TX rate limiter config. Only provided data will be updated. I.e. if any optional data
/// is missing, it will not be nullified, but left unchanged.
pub tx_rate_limiter: Option<RateLimiterConfig>,
}
/// Errors associated with the operations allowed on a net device.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum NetworkInterfaceError {
/// Could not create the network device: {0}
CreateNetworkDevice(#[from] crate::devices::virtio::net::NetError),
/// Cannot create the rate limiter: {0}
CreateRateLimiter(#[from] std::io::Error),
/// Unable to update the net device: {0}
DeviceUpdate(#[from] VmmError),
/// The MAC address is already in use: {0}
GuestMacAddressInUse(String),
/// Cannot open/create the tap device: {0}
OpenTap(#[from] TapError),
}
/// Builder for a list of network devices.
#[derive(Debug, Default)]
pub struct NetBuilder {
net_devices: Vec<Arc<Mutex<Net>>>,
}
impl NetBuilder {
/// Creates an empty list of Network Devices.
pub fn new() -> Self {
NetBuilder {
// List of built network devices.
net_devices: Vec::new(),
}
}
/// Returns a immutable iterator over the network devices.
pub fn iter(&self) -> ::std::slice::Iter<'_, Arc<Mutex<Net>>> {
self.net_devices.iter()
}
/// Adds an existing network device in the builder.
pub fn add_device(&mut self, device: Arc<Mutex<Net>>) {
self.net_devices.push(device);
}
/// Builds a network device based on a network interface config. Keeps a device reference
/// in the builder's internal list.
pub fn build(
&mut self,
netif_config: NetworkInterfaceConfig,
) -> Result<Arc<Mutex<Net>>, NetworkInterfaceError> {
if let Some(ref mac_address) = netif_config.guest_mac {
let mac_conflict = |net: &Arc<Mutex<Net>>| {
let net = net.lock().expect("Poisoned lock");
// Check if another net dev has same MAC.
Some(mac_address) == net.guest_mac() && &netif_config.iface_id != net.id()
};
// Validate there is no Mac conflict.
// No need to validate host_dev_name conflict. In such a case,
// an error will be thrown during device creation anyway.
if self.net_devices.iter().any(mac_conflict) {
return Err(NetworkInterfaceError::GuestMacAddressInUse(
mac_address.to_string(),
));
}
}
// If this is an update, just remove the old one.
if let Some(index) = self
.net_devices
.iter()
.position(|net| net.lock().expect("Poisoned lock").id() == &netif_config.iface_id)
{
self.net_devices.swap_remove(index);
}
// Add new device.
let net = Arc::new(Mutex::new(Self::create_net(netif_config)?));
self.net_devices.push(net.clone());
Ok(net)
}
/// Creates a Net device from a NetworkInterfaceConfig.
pub fn create_net(cfg: NetworkInterfaceConfig) -> Result<Net, NetworkInterfaceError> {
let rx_rate_limiter = cfg
.rx_rate_limiter
.map(super::RateLimiterConfig::try_into)
.transpose()
.map_err(NetworkInterfaceError::CreateRateLimiter)?;
let tx_rate_limiter = cfg
.tx_rate_limiter
.map(super::RateLimiterConfig::try_into)
.transpose()
.map_err(NetworkInterfaceError::CreateRateLimiter)?;
// Create and return the Net device
crate::devices::virtio::net::Net::new(
cfg.iface_id,
&cfg.host_dev_name,
cfg.guest_mac,
rx_rate_limiter.unwrap_or_default(),
tx_rate_limiter.unwrap_or_default(),
)
.map_err(NetworkInterfaceError::CreateNetworkDevice)
}
/// Returns a vec with the structures used to configure the net devices.
pub fn configs(&self) -> Vec<NetworkInterfaceConfig> {
let mut ret = vec![];
for net in &self.net_devices {
ret.push(NetworkInterfaceConfig::from(net.lock().unwrap().deref()));
}
ret
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::*;
use crate::rate_limiter::RateLimiter;
impl NetBuilder {
pub(crate) fn len(&self) -> usize {
self.net_devices.len()
}
}
fn create_netif(id: &str, name: &str, mac: &str) -> NetworkInterfaceConfig {
NetworkInterfaceConfig {
iface_id: String::from(id),
host_dev_name: String::from(name),
guest_mac: Some(MacAddr::from_str(mac).unwrap()),
rx_rate_limiter: RateLimiterConfig::default().into_option(),
tx_rate_limiter: RateLimiterConfig::default().into_option(),
}
}
impl Clone for NetworkInterfaceConfig {
fn clone(&self) -> Self {
NetworkInterfaceConfig {
iface_id: self.iface_id.clone(),
host_dev_name: self.host_dev_name.clone(),
guest_mac: self.guest_mac,
rx_rate_limiter: None,
tx_rate_limiter: None,
}
}
}
#[test]
fn test_insert() {
let mut net_builder = NetBuilder::new();
let id_1 = "id_1";
let mut host_dev_name_1 = "dev1";
let mut guest_mac_1 = "01:23:45:67:89:0a";
// Test create.
let netif_1 = create_netif(id_1, host_dev_name_1, guest_mac_1);
net_builder.build(netif_1).unwrap();
assert_eq!(net_builder.net_devices.len(), 1);
// Test update mac address (this test does not modify the tap).
guest_mac_1 = "01:23:45:67:89:0b";
let netif_1 = create_netif(id_1, host_dev_name_1, guest_mac_1);
net_builder.build(netif_1).unwrap();
assert_eq!(net_builder.net_devices.len(), 1);
// Test update host_dev_name (the tap will be updated).
host_dev_name_1 = "dev2";
let netif_1 = create_netif(id_1, host_dev_name_1, guest_mac_1);
net_builder.build(netif_1).unwrap();
assert_eq!(net_builder.net_devices.len(), 1);
}
#[test]
fn test_insert_error_cases() {
let mut net_builder = NetBuilder::new();
let id_1 = "id_1";
let host_dev_name_1 = "dev3";
let guest_mac_1 = "01:23:45:67:89:0a";
// Adding the first valid network config.
let netif_1 = create_netif(id_1, host_dev_name_1, guest_mac_1);
net_builder.build(netif_1).unwrap();
// Error Cases for CREATE
// Error Case: Add new network config with the same mac as netif_1.
let id_2 = "id_2";
let host_dev_name_2 = "dev4";
let guest_mac_2 = "01:23:45:67:89:0b";
let netif_2 = create_netif(id_2, host_dev_name_2, guest_mac_1);
let expected_error = NetworkInterfaceError::GuestMacAddressInUse(guest_mac_1.into());
assert_eq!(
net_builder.build(netif_2).err().unwrap().to_string(),
expected_error.to_string()
);
assert_eq!(net_builder.net_devices.len(), 1);
// Error Case: Add new network config with the same dev_host_name as netif_1.
let netif_2 = create_netif(id_2, host_dev_name_1, guest_mac_2);
assert_eq!(
net_builder.build(netif_2).err().unwrap().to_string(),
NetworkInterfaceError::CreateNetworkDevice(
crate::devices::virtio::net::NetError::TapOpen(TapError::IfreqExecuteError(
std::io::Error::from_raw_os_error(16),
host_dev_name_1.to_string()
))
)
.to_string()
);
assert_eq!(net_builder.net_devices.len(), 1);
// Adding the second valid network config.
let netif_2 = create_netif(id_2, host_dev_name_2, guest_mac_2);
net_builder.build(netif_2).unwrap();
// Error Cases for UPDATE
// Error Case: Update netif_2 mac using the same mac as netif_1.
let netif_2 = create_netif(id_2, host_dev_name_2, guest_mac_1);
let expected_error = NetworkInterfaceError::GuestMacAddressInUse(guest_mac_1.into());
assert_eq!(
net_builder.build(netif_2).err().unwrap().to_string(),
expected_error.to_string()
);
// Error Case: Update netif_2 dev_host_name using the same dev_host_name as netif_1.
let netif_2 = create_netif(id_2, host_dev_name_1, guest_mac_2);
assert_eq!(
net_builder.build(netif_2).err().unwrap().to_string(),
NetworkInterfaceError::CreateNetworkDevice(
crate::devices::virtio::net::NetError::TapOpen(TapError::IfreqExecuteError(
std::io::Error::from_raw_os_error(16),
host_dev_name_1.to_string()
))
)
.to_string()
);
}
#[test]
fn test_net_config() {
let net_id = "id";
let host_dev_name = "dev";
let guest_mac = "01:23:45:67:89:0b";
let net_if_cfg = create_netif(net_id, host_dev_name, guest_mac);
assert_eq!(
net_if_cfg.guest_mac.unwrap(),
MacAddr::from_str(guest_mac).unwrap()
);
let mut net_builder = NetBuilder::new();
net_builder.build(net_if_cfg.clone()).unwrap();
assert_eq!(net_builder.net_devices.len(), 1);
let configs = net_builder.configs();
assert_eq!(configs.len(), 1);
assert_eq!(configs.first().unwrap(), &net_if_cfg);
}
#[test]
fn test_add_device() {
let mut net_builder = NetBuilder::new();
let net_id = "test_id";
let host_dev_name = "dev";
let guest_mac = "01:23:45:67:89:0b";
let net = Net::new(
net_id.to_string(),
host_dev_name,
Some(MacAddr::from_str(guest_mac).unwrap()),
RateLimiter::default(),
RateLimiter::default(),
)
.unwrap();
net_builder.add_device(Arc::new(Mutex::new(net)));
assert_eq!(net_builder.net_devices.len(), 1);
assert_eq!(
net_builder
.net_devices
.pop()
.unwrap()
.lock()
.unwrap()
.deref()
.id(),
net_id
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/vmm_config/balloon.rs | src/vmm/src/vmm_config/balloon.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
pub use crate::devices::virtio::balloon::BALLOON_DEV_ID;
pub use crate::devices::virtio::balloon::device::BalloonStats;
use crate::devices::virtio::balloon::{Balloon, BalloonConfig};
type MutexBalloon = Arc<Mutex<Balloon>>;
/// Errors associated with the operations allowed on the balloon.
#[derive(Debug, derive_more::From, thiserror::Error, displaydoc::Display)]
pub enum BalloonConfigError {
/// No balloon device found.
DeviceNotFound,
/// Amount of pages requested is too large.
TooManyPagesRequested,
/// Error creating the balloon device: {0}
CreateFailure(crate::devices::virtio::balloon::BalloonError),
}
/// This struct represents the strongly typed equivalent of the json body
/// from balloon related requests.
#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct BalloonDeviceConfig {
/// Target balloon size in MiB.
pub amount_mib: u32,
/// Option to deflate the balloon in case the guest is out of memory.
pub deflate_on_oom: bool,
/// Interval in seconds between refreshing statistics.
#[serde(default)]
pub stats_polling_interval_s: u16,
/// Free page hinting enabled
#[serde(default)]
pub free_page_hinting: bool,
/// Free page reporting enabled
#[serde(default)]
pub free_page_reporting: bool,
}
impl From<BalloonConfig> for BalloonDeviceConfig {
fn from(state: BalloonConfig) -> Self {
BalloonDeviceConfig {
amount_mib: state.amount_mib,
deflate_on_oom: state.deflate_on_oom,
stats_polling_interval_s: state.stats_polling_interval_s,
free_page_hinting: state.free_page_hinting,
free_page_reporting: state.free_page_reporting,
}
}
}
/// The data fed into a balloon update request. Currently, only the number
/// of pages and the stats polling interval can be updated.
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct BalloonUpdateConfig {
/// Target balloon size in MiB.
pub amount_mib: u32,
}
/// The data fed into a balloon statistics interval update request.
/// Note that the state of the statistics cannot be changed from ON to OFF
/// or vice versa after boot, only the interval of polling can be changed
/// if the statistics were activated in the device configuration.
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct BalloonUpdateStatsConfig {
/// Interval in seconds between refreshing statistics.
pub stats_polling_interval_s: u16,
}
/// A builder for `Balloon` devices from 'BalloonDeviceConfig'.
#[cfg_attr(not(test), derive(Default))]
#[derive(Debug)]
pub struct BalloonBuilder {
inner: Option<MutexBalloon>,
}
impl BalloonBuilder {
/// Creates an empty Balloon Store.
pub fn new() -> Self {
Self { inner: None }
}
/// Inserts a Balloon device in the store.
/// If an entry already exists, it will overwrite it.
pub fn set(&mut self, cfg: BalloonDeviceConfig) -> Result<(), BalloonConfigError> {
self.inner = Some(Arc::new(Mutex::new(Balloon::new(
cfg.amount_mib,
cfg.deflate_on_oom,
cfg.stats_polling_interval_s,
cfg.free_page_hinting,
cfg.free_page_reporting,
)?)));
Ok(())
}
/// Inserts an existing balloon device.
pub fn set_device(&mut self, balloon: MutexBalloon) {
self.inner = Some(balloon);
}
/// Provides a reference to the Balloon if present.
pub fn get(&self) -> Option<&MutexBalloon> {
self.inner.as_ref()
}
/// Returns the same structure that was used to configure the device.
pub fn get_config(&self) -> Result<BalloonDeviceConfig, BalloonConfigError> {
self.get()
.ok_or(BalloonConfigError::DeviceNotFound)
.map(|balloon_mutex| balloon_mutex.lock().expect("Poisoned lock").config())
.map(BalloonDeviceConfig::from)
}
}
#[cfg(test)]
impl Default for BalloonBuilder {
fn default() -> BalloonBuilder {
let mut balloon = BalloonBuilder::new();
balloon.set(BalloonDeviceConfig::default()).unwrap();
balloon
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
pub(crate) fn default_config() -> BalloonDeviceConfig {
BalloonDeviceConfig {
amount_mib: 0,
deflate_on_oom: false,
stats_polling_interval_s: 0,
free_page_hinting: false,
free_page_reporting: false,
}
}
#[test]
fn test_balloon_create() {
let default_balloon_config = default_config();
let balloon_config = BalloonDeviceConfig {
amount_mib: 0,
deflate_on_oom: false,
stats_polling_interval_s: 0,
free_page_hinting: false,
free_page_reporting: false,
};
assert_eq!(default_balloon_config, balloon_config);
let mut builder = BalloonBuilder::new();
assert!(builder.get().is_none());
builder.set(balloon_config).unwrap();
assert_eq!(builder.get().unwrap().lock().unwrap().num_pages(), 0);
assert_eq!(builder.get_config().unwrap(), default_balloon_config);
let _update_config = BalloonUpdateConfig { amount_mib: 5 };
let _stats_update_config = BalloonUpdateStatsConfig {
stats_polling_interval_s: 5,
};
}
#[test]
fn test_from_balloon_state() {
let expected_balloon_config = BalloonDeviceConfig {
amount_mib: 5,
deflate_on_oom: false,
stats_polling_interval_s: 3,
free_page_hinting: false,
free_page_reporting: false,
};
let actual_balloon_config = BalloonDeviceConfig::from(BalloonConfig {
amount_mib: 5,
deflate_on_oom: false,
stats_polling_interval_s: 3,
free_page_hinting: false,
free_page_reporting: false,
});
assert_eq!(expected_balloon_config, actual_balloon_config);
}
#[test]
fn test_set_device() {
let mut builder = BalloonBuilder::new();
let balloon = Balloon::new(0, true, 0, false, false).unwrap();
builder.set_device(Arc::new(Mutex::new(balloon)));
assert!(builder.inner.is_some());
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/gdb/target.rs | src/vmm/src/gdb/target.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::sync::mpsc::{Receiver, RecvError};
use std::sync::{Arc, Mutex, PoisonError};
use arrayvec::ArrayVec;
use gdbstub::arch::Arch;
use gdbstub::common::{Signal, Tid};
use gdbstub::stub::{BaseStopReason, MultiThreadStopReason};
use gdbstub::target::ext::base::BaseOps;
use gdbstub::target::ext::base::multithread::{
MultiThreadBase, MultiThreadResume, MultiThreadResumeOps, MultiThreadSingleStep,
MultiThreadSingleStepOps,
};
use gdbstub::target::ext::breakpoints::{
Breakpoints, BreakpointsOps, HwBreakpoint, HwBreakpointOps, SwBreakpoint, SwBreakpointOps,
};
use gdbstub::target::ext::thread_extra_info::{ThreadExtraInfo, ThreadExtraInfoOps};
use gdbstub::target::{Target, TargetError, TargetResult};
#[cfg(target_arch = "aarch64")]
use gdbstub_arch::aarch64::AArch64 as GdbArch;
#[cfg(target_arch = "aarch64")]
use gdbstub_arch::aarch64::reg::AArch64CoreRegs as CoreRegs;
#[cfg(target_arch = "x86_64")]
use gdbstub_arch::x86::X86_64_SSE as GdbArch;
#[cfg(target_arch = "x86_64")]
use gdbstub_arch::x86::reg::X86_64CoreRegs as CoreRegs;
use vm_memory::{Bytes, GuestAddress, GuestMemoryError};
use super::arch;
use crate::arch::GUEST_PAGE_SIZE;
#[cfg(target_arch = "aarch64")]
use crate::arch::aarch64::vcpu::VcpuArchError as AarchVcpuError;
use crate::logger::{error, info};
use crate::utils::u64_to_usize;
use crate::vstate::vcpu::VcpuSendEventError;
use crate::{FcExitCode, VcpuEvent, VcpuResponse, Vmm};
#[derive(Debug, Default, Clone, Copy)]
/// Stores the current state of a Vcpu with a copy of the Vcpu file descriptor
struct VcpuState {
single_step: bool,
paused: bool,
}
impl VcpuState {
/// Disables single stepping on the Vcpu state
fn reset_vcpu_state(&mut self) {
self.single_step = false;
}
}
/// Errors from interactions between GDB and the VMM
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum GdbTargetError {
/// An error during a GDB request
GdbRequest,
/// An error with the queue between the target and the Vcpus
GdbQueueError,
/// The response from the Vcpu was not allowed
VcuRequestError,
/// No currently paused Vcpu error
NoPausedVcpu,
/// Error when setting Vcpu debug flags
VcpuKvmError,
/// Server socket Error: {0}
ServerSocketError(std::io::Error),
/// Error with creating GDB thread
GdbThreadError,
/// VMM locking error
VmmLockError,
/// Vcpu send event error
VcpuSendEventError(#[from] VcpuSendEventError),
/// Recieve error from Vcpu channel
VcpuRecvError(#[from] RecvError),
/// TID Conversion error
TidConversionError,
/// KVM set guest debug error
KvmIoctlsError(#[from] kvm_ioctls::Error),
/// Gva no translation available
GvaTranslateError,
/// Conversion error with cpu rflags
RegFlagConversionError,
#[cfg(target_arch = "aarch64")]
/// Error retrieving registers from a Vcpu
ReadRegisterError(#[from] AarchVcpuError),
#[cfg(target_arch = "aarch64")]
/// Error retrieving registers from a register vec.
ReadRegisterVecError,
/// Error while reading/writing to guest memory
GuestMemoryError(#[from] GuestMemoryError),
}
impl From<GdbTargetError> for TargetError<GdbTargetError> {
fn from(error: GdbTargetError) -> Self {
match error {
GdbTargetError::VmmLockError => TargetError::Fatal(GdbTargetError::VmmLockError),
_ => TargetError::NonFatal,
}
}
}
impl<E> From<PoisonError<E>> for GdbTargetError {
fn from(_value: PoisonError<E>) -> Self {
GdbTargetError::VmmLockError
}
}
/// Debug Target for firecracker.
///
/// This is used the manage the debug implementation and handle requests sent via GDB
#[derive(Debug)]
pub struct FirecrackerTarget {
/// A mutex around the VMM to allow communicataion to the Vcpus
vmm: Arc<Mutex<Vmm>>,
/// Store the guest entry point
entry_addr: GuestAddress,
/// Listener for events sent from the Vcpu
pub gdb_event: Receiver<usize>,
/// Used to track the currently configured hardware breakpoints.
/// Limited to 4 in x86 see:
/// https://elixir.bootlin.com/linux/v6.1/source/arch/x86/include/asm/kvm_host.h#L210
hw_breakpoints: ArrayVec<GuestAddress, 4>,
/// Used to track the currently configured software breakpoints and store the op-code
/// which was swapped out
sw_breakpoints: HashMap<<GdbArch as Arch>::Usize, [u8; arch::SW_BP_SIZE]>,
/// Stores the current state of each Vcpu
vcpu_state: Vec<VcpuState>,
/// Stores the current paused thread id, GDB can inact commands without providing us a Tid to
/// run on and expects us to use the last paused thread.
paused_vcpu: Option<Tid>,
}
/// Convert the 1 indexed Tid to the 0 indexed Vcpuid
fn tid_to_vcpuid(tid: Tid) -> usize {
tid.get() - 1
}
/// Converts the inernal index of a Vcpu to
/// the Tid required by GDB
pub fn vcpuid_to_tid(cpu_id: usize) -> Result<Tid, GdbTargetError> {
Tid::new(get_raw_tid(cpu_id)).ok_or(GdbTargetError::TidConversionError)
}
/// Converts the inernal index of a Vcpu to
/// the 1 indexed value for GDB
pub fn get_raw_tid(cpu_id: usize) -> usize {
cpu_id + 1
}
impl FirecrackerTarget {
/// Creates a new Target for GDB stub. This is used as the layer between GDB and the VMM it
/// will handle requests from GDB and perform the appropriate actions, while also updating GDB
/// with the state of the VMM / Vcpu's as we hit debug events
pub fn new(vmm: Arc<Mutex<Vmm>>, gdb_event: Receiver<usize>, entry_addr: GuestAddress) -> Self {
let mut vcpu_state = vec![VcpuState::default(); vmm.lock().unwrap().vcpus_handles.len()];
// By default vcpu 1 will be paused at the entry point
vcpu_state[0].paused = true;
Self {
vmm,
entry_addr,
gdb_event,
// We only support 4 hw breakpoints on x86 this will need to be configurable on arm
hw_breakpoints: Default::default(),
sw_breakpoints: HashMap::new(),
vcpu_state,
paused_vcpu: Tid::new(1),
}
}
// Update KVM debug info for a specific vcpu index.
fn update_vcpu_kvm_debug(
&self,
vcpu_idx: usize,
hw_breakpoints: &[GuestAddress],
) -> Result<(), GdbTargetError> {
let state = &self.vcpu_state[vcpu_idx];
if !state.paused {
info!("Attempted to update kvm debug on a non paused Vcpu");
return Ok(());
}
let vcpu_fd = &self.vmm.lock().unwrap().vcpus_handles[vcpu_idx].vcpu_fd;
arch::vcpu_set_debug(vcpu_fd, hw_breakpoints, state.single_step)
}
/// Translate guest virtual address to guest pysical address.
fn translate_gva(&self, vcpu_idx: usize, addr: u64) -> Result<u64, GdbTargetError> {
let vmm = self.vmm.lock().unwrap();
let vcpu_fd = &vmm.vcpus_handles[vcpu_idx].vcpu_fd;
arch::translate_gva(vcpu_fd, addr, &vmm)
}
/// Retrieves the currently paused Vcpu id returns an error if there is no currently paused Vcpu
fn get_paused_vcpu_id(&self) -> Result<Tid, GdbTargetError> {
self.paused_vcpu.ok_or(GdbTargetError::NoPausedVcpu)
}
/// Returns the index of the paused vcpu.
fn get_paused_vcpu_idx(&self) -> Result<usize, GdbTargetError> {
Ok(tid_to_vcpuid(self.get_paused_vcpu_id()?))
}
/// Updates state to reference the currently paused Vcpu and store that the cpu is currently
/// paused
pub fn set_paused_vcpu(&mut self, tid: Tid) {
self.vcpu_state[tid_to_vcpuid(tid)].paused = true;
self.paused_vcpu = Some(tid);
}
/// Resumes execution of all paused Vcpus, update them with current kvm debug info
/// and resumes
fn resume_all_vcpus(&mut self) -> Result<(), GdbTargetError> {
for idx in 0..self.vcpu_state.len() {
self.update_vcpu_kvm_debug(idx, &self.hw_breakpoints)?;
}
for cpu_id in 0..self.vcpu_state.len() {
let tid = vcpuid_to_tid(cpu_id)?;
self.resume_vcpu(tid)?;
}
self.paused_vcpu = None;
Ok(())
}
/// Resets all Vcpus to their base state
fn reset_all_vcpu_states(&mut self) {
for value in self.vcpu_state.iter_mut() {
value.reset_vcpu_state();
}
}
/// Shuts down the VMM
pub fn shutdown_vmm(&self) {
self.vmm
.lock()
.expect("error unlocking vmm")
.stop(FcExitCode::Ok)
}
/// Pauses the requested Vcpu
pub fn pause_vcpu(&mut self, tid: Tid) -> Result<(), GdbTargetError> {
let vcpu_state = &mut self.vcpu_state[tid_to_vcpuid(tid)];
if vcpu_state.paused {
info!("Attempted to pause a vcpu already paused.");
// Pausing an already paused vcpu is not considered an error case from GDB
return Ok(());
}
let cpu_handle = &mut self.vmm.lock()?.vcpus_handles[tid_to_vcpuid(tid)];
cpu_handle.send_event(VcpuEvent::Pause)?;
let _ = cpu_handle.response_receiver().recv()?;
vcpu_state.paused = true;
Ok(())
}
/// A helper function to allow the event loop to inject this breakpoint back into the Vcpu
pub fn inject_bp_to_guest(&mut self, tid: Tid) -> Result<(), GdbTargetError> {
let vmm = self.vmm.lock().unwrap();
let vcpu_idx = tid_to_vcpuid(tid);
let vcpu_fd = &vmm.vcpus_handles[vcpu_idx].vcpu_fd;
arch::vcpu_inject_bp(vcpu_fd, &self.hw_breakpoints, false)
}
/// Resumes the Vcpu, will return early if the Vcpu is already running
pub fn resume_vcpu(&mut self, tid: Tid) -> Result<(), GdbTargetError> {
let vcpu_state = &mut self.vcpu_state[tid_to_vcpuid(tid)];
if !vcpu_state.paused {
info!("Attempted to resume a vcpu already running.");
// Resuming an already running Vcpu is not considered an error case from GDB
return Ok(());
}
let cpu_handle = &mut self.vmm.lock()?.vcpus_handles[tid_to_vcpuid(tid)];
cpu_handle.send_event(VcpuEvent::Resume)?;
let response = cpu_handle.response_receiver().recv()?;
if let VcpuResponse::NotAllowed(message) = response {
error!("Response resume : {message}");
return Err(GdbTargetError::VcuRequestError);
}
vcpu_state.paused = false;
Ok(())
}
/// Identifies why the specific core was paused to be returned to GDB if None is returned this
/// indicates to handle this internally and don't notify GDB
pub fn get_stop_reason(
&self,
tid: Tid,
) -> Result<Option<BaseStopReason<Tid, u64>>, GdbTargetError> {
let vcpu_idx = tid_to_vcpuid(tid);
let vcpu_state = &self.vcpu_state[vcpu_idx];
if vcpu_state.single_step {
return Ok(Some(MultiThreadStopReason::SignalWithThread {
tid,
signal: Signal::SIGTRAP,
}));
}
let vmm = self.vmm.lock().unwrap();
let vcpu_fd = &vmm.vcpus_handles[vcpu_idx].vcpu_fd;
let Ok(ip) = arch::get_instruction_pointer(vcpu_fd) else {
// If we error here we return an arbitrary Software Breakpoint, GDB will handle
// this gracefully
return Ok(Some(MultiThreadStopReason::SwBreak(tid)));
};
let gpa = arch::translate_gva(vcpu_fd, ip, &vmm)?;
if self.sw_breakpoints.contains_key(&gpa) {
return Ok(Some(MultiThreadStopReason::SwBreak(tid)));
}
if self.hw_breakpoints.contains(&GuestAddress(ip)) {
return Ok(Some(MultiThreadStopReason::HwBreak(tid)));
}
if ip == self.entry_addr.0 {
return Ok(Some(MultiThreadStopReason::HwBreak(tid)));
}
// This is not a breakpoint we've set, likely one set by the guest
Ok(None)
}
}
impl Target for FirecrackerTarget {
type Error = GdbTargetError;
type Arch = GdbArch;
#[inline(always)]
fn base_ops(&mut self) -> BaseOps<Self::Arch, Self::Error> {
BaseOps::MultiThread(self)
}
#[inline(always)]
fn support_breakpoints(&mut self) -> Option<BreakpointsOps<Self>> {
Some(self)
}
/// We disable implicit sw breakpoints as we want to manage these internally so we can inject
/// breakpoints back into the guest if we didn't create them
#[inline(always)]
fn guard_rail_implicit_sw_breakpoints(&self) -> bool {
false
}
}
impl MultiThreadBase for FirecrackerTarget {
/// Reads the registers for the Vcpu
fn read_registers(&mut self, regs: &mut CoreRegs, tid: Tid) -> TargetResult<(), Self> {
let vmm = self.vmm.lock().unwrap();
let vcpu_idx = tid_to_vcpuid(tid);
let vcpu_fd = &vmm.vcpus_handles[vcpu_idx].vcpu_fd;
arch::read_registers(vcpu_fd, regs)?;
Ok(())
}
/// Writes to the registers for the Vcpu
fn write_registers(&mut self, regs: &CoreRegs, tid: Tid) -> TargetResult<(), Self> {
let vmm = self.vmm.lock().unwrap();
let vcpu_idx = tid_to_vcpuid(tid);
let vcpu_fd = &vmm.vcpus_handles[vcpu_idx].vcpu_fd;
arch::write_registers(vcpu_fd, regs)?;
Ok(())
}
/// Writes data to a guest virtual address for the Vcpu
fn read_addrs(
&mut self,
mut gva: <Self::Arch as Arch>::Usize,
mut data: &mut [u8],
tid: Tid,
) -> TargetResult<usize, Self> {
let data_len = data.len();
let vmm = self.vmm.lock().unwrap();
let vcpu_idx = tid_to_vcpuid(tid);
let vcpu_fd = &vmm.vcpus_handles[vcpu_idx].vcpu_fd;
while !data.is_empty() {
let gpa = arch::translate_gva(vcpu_fd, gva, &vmm).map_err(|e| {
error!("Error {e:?} translating gva on read address: {gva:#X}");
})?;
// Compute the amount space left in the page after the gpa
let read_len = std::cmp::min(
data.len(),
GUEST_PAGE_SIZE - (u64_to_usize(gpa) & (GUEST_PAGE_SIZE - 1)),
);
vmm.vm
.guest_memory()
.read(&mut data[..read_len], GuestAddress(gpa as u64))
.map_err(|e| {
error!("Error reading memory {e:?} gpa is {gpa}");
})?;
data = &mut data[read_len..];
gva += read_len as u64;
}
Ok(data_len)
}
/// Writes data at a guest virtual address for the Vcpu
fn write_addrs(
&mut self,
mut gva: <Self::Arch as Arch>::Usize,
mut data: &[u8],
tid: Tid,
) -> TargetResult<(), Self> {
let vmm = self.vmm.lock().unwrap();
let vcpu_idx = tid_to_vcpuid(tid);
let vcpu_fd = &vmm.vcpus_handles[vcpu_idx].vcpu_fd;
while !data.is_empty() {
let gpa = arch::translate_gva(vcpu_fd, gva, &vmm).map_err(|e| {
error!("Error {e:?} translating gva on read address: {gva:#X}");
})?;
// Compute the amount space left in the page after the gpa
let write_len = std::cmp::min(
data.len(),
GUEST_PAGE_SIZE - (u64_to_usize(gpa) & (GUEST_PAGE_SIZE - 1)),
);
vmm.vm
.guest_memory()
.write(&data[..write_len], GuestAddress(gpa))
.map_err(|e| {
error!("Error {e:?} writing memory at {gpa:#X}");
})?;
data = &data[write_len..];
gva += write_len as u64;
}
Ok(())
}
#[inline(always)]
/// Makes the callback provided with each Vcpu
/// GDB expects us to return all threads currently running with this command, for firecracker
/// this is all Vcpus
fn list_active_threads(
&mut self,
thread_is_active: &mut dyn FnMut(Tid),
) -> Result<(), Self::Error> {
for id in 0..self.vcpu_state.len() {
thread_is_active(vcpuid_to_tid(id)?)
}
Ok(())
}
#[inline(always)]
fn support_resume(&mut self) -> Option<MultiThreadResumeOps<Self>> {
Some(self)
}
#[inline(always)]
fn support_thread_extra_info(&mut self) -> Option<ThreadExtraInfoOps<'_, Self>> {
Some(self)
}
}
impl MultiThreadResume for FirecrackerTarget {
/// Disables single step on the Vcpu
fn set_resume_action_continue(
&mut self,
tid: Tid,
_signal: Option<Signal>,
) -> Result<(), Self::Error> {
self.vcpu_state[tid_to_vcpuid(tid)].single_step = false;
Ok(())
}
/// Resumes the execution of all currently paused Vcpus
fn resume(&mut self) -> Result<(), Self::Error> {
self.resume_all_vcpus()
}
/// Clears the state of all Vcpus setting it back to base config
fn clear_resume_actions(&mut self) -> Result<(), Self::Error> {
self.reset_all_vcpu_states();
Ok(())
}
#[inline(always)]
fn support_single_step(&mut self) -> Option<MultiThreadSingleStepOps<'_, Self>> {
Some(self)
}
}
impl MultiThreadSingleStep for FirecrackerTarget {
/// Enabled single step on the Vcpu
fn set_resume_action_step(
&mut self,
tid: Tid,
_signal: Option<Signal>,
) -> Result<(), Self::Error> {
self.vcpu_state[tid_to_vcpuid(tid)].single_step = true;
Ok(())
}
}
impl Breakpoints for FirecrackerTarget {
#[inline(always)]
fn support_hw_breakpoint(&mut self) -> Option<HwBreakpointOps<Self>> {
Some(self)
}
#[inline(always)]
fn support_sw_breakpoint(&mut self) -> Option<SwBreakpointOps<Self>> {
Some(self)
}
}
impl HwBreakpoint for FirecrackerTarget {
/// Adds a hardware breakpoint The breakpoint addresses are
/// stored in state so we can track the reason for an exit.
fn add_hw_breakpoint(
&mut self,
gva: <Self::Arch as Arch>::Usize,
_kind: <Self::Arch as Arch>::BreakpointKind,
) -> TargetResult<bool, Self> {
let ga = GuestAddress(gva);
if self.hw_breakpoints.contains(&ga) {
return Ok(true);
}
if self.hw_breakpoints.try_push(ga).is_err() {
return Ok(false);
}
let vcpu_idx = self.get_paused_vcpu_idx()?;
self.update_vcpu_kvm_debug(vcpu_idx, &self.hw_breakpoints)?;
Ok(true)
}
/// Removes a hardware breakpoint.
fn remove_hw_breakpoint(
&mut self,
gva: <Self::Arch as Arch>::Usize,
_kind: <Self::Arch as Arch>::BreakpointKind,
) -> TargetResult<bool, Self> {
match self.hw_breakpoints.iter().position(|&b| b.0 == gva) {
None => return Ok(false),
Some(pos) => self.hw_breakpoints.remove(pos),
};
let vcpu_idx = self.get_paused_vcpu_idx()?;
self.update_vcpu_kvm_debug(vcpu_idx, &self.hw_breakpoints)?;
Ok(true)
}
}
impl SwBreakpoint for FirecrackerTarget {
/// Inserts a software breakpoint.
/// We initially translate the guest virtual address to a guest physical address and then check
/// if this is already present, if so we return early. Otherwise we store the opcode at the
/// specified guest physical address in our store and replace it with the `X86_SW_BP_OP`
fn add_sw_breakpoint(
&mut self,
addr: <Self::Arch as Arch>::Usize,
_kind: <Self::Arch as Arch>::BreakpointKind,
) -> TargetResult<bool, Self> {
let vcpu_idx = self.get_paused_vcpu_idx()?;
let gpa = self.translate_gva(vcpu_idx, addr)?;
if self.sw_breakpoints.contains_key(&gpa) {
return Ok(true);
}
let paused_vcpu_id = self.get_paused_vcpu_id()?;
let mut saved_register = [0; arch::SW_BP_SIZE];
self.read_addrs(addr, &mut saved_register, paused_vcpu_id)?;
self.sw_breakpoints.insert(gpa, saved_register);
self.write_addrs(addr, &arch::SW_BP, paused_vcpu_id)?;
Ok(true)
}
/// Removes a software breakpoint.
/// We firstly translate the guest virtual address to a guest physical address, we then check if
/// the resulting gpa is in our store, if so we load the stored opcode and write this back
fn remove_sw_breakpoint(
&mut self,
addr: <Self::Arch as Arch>::Usize,
_kind: <Self::Arch as Arch>::BreakpointKind,
) -> TargetResult<bool, Self> {
let vcpu_idx = self.get_paused_vcpu_idx()?;
let gpa = self.translate_gva(vcpu_idx, addr)?;
if let Some(removed) = self.sw_breakpoints.remove(&gpa) {
self.write_addrs(addr, &removed, self.get_paused_vcpu_id()?)?;
return Ok(true);
}
Ok(false)
}
}
impl ThreadExtraInfo for FirecrackerTarget {
/// Allows us to configure the formatting of the thread information, we just return the ID of
/// the Vcpu
fn thread_extra_info(&self, tid: Tid, buf: &mut [u8]) -> Result<usize, Self::Error> {
let info = format!("Vcpu ID: {}", tid_to_vcpuid(tid));
let size = buf.len().min(info.len());
buf[..size].copy_from_slice(&info.as_bytes()[..size]);
Ok(size)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/gdb/mod.rs | src/vmm/src/gdb/mod.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/// Arch specific implementations
mod arch;
/// Event loop for connection to GDB server
mod event_loop;
/// Target for gdb
pub mod target;
use std::os::unix::net::UnixListener;
use std::path::Path;
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use arch::vcpu_set_debug;
use event_loop::event_loop;
use target::GdbTargetError;
use vm_memory::GuestAddress;
use crate::Vmm;
use crate::logger::trace;
/// Kickstarts the GDB debugging process, it takes in the VMM object, a slice of
/// the paused Vcpu's, the GDB event queue which is used as a mechanism for the Vcpu's to notify
/// our GDB thread that they've been paused, then finally the entry address of the kernel.
///
/// Firstly the function will start by configuring the Vcpus with KVM for debugging
///
/// This will then create the GDB socket which will be used for communication to the GDB process.
/// After creating this, the function will block while waiting for GDB to connect.
///
/// After the connection has been established the function will start a new thread for handling
/// communcation to the GDB server
pub fn gdb_thread(
vmm: Arc<Mutex<Vmm>>,
gdb_event_receiver: Receiver<usize>,
entry_addr: GuestAddress,
socket_addr: &str,
) -> Result<(), GdbTargetError> {
// We register a hw breakpoint at the entry point as GDB expects the application
// to be stopped as it connects. This also allows us to set breakpoints before kernel starts.
// This entry adddress is automatically used as it is not tracked inside the target state, so
// when resumed will be removed
{
let vmm = vmm.lock().unwrap();
vcpu_set_debug(&vmm.vcpus_handles[0].vcpu_fd, &[entry_addr], false)?;
for handle in &vmm.vcpus_handles[1..] {
vcpu_set_debug(&handle.vcpu_fd, &[], false)?;
}
}
let path = Path::new(socket_addr);
let listener = UnixListener::bind(path).map_err(GdbTargetError::ServerSocketError)?;
trace!("Waiting for GDB server connection on {}...", path.display());
let (connection, _addr) = listener
.accept()
.map_err(GdbTargetError::ServerSocketError)?;
std::thread::Builder::new()
.name("gdb".into())
.spawn(move || event_loop(connection, vmm, gdb_event_receiver, entry_addr))
.map_err(|_| GdbTargetError::GdbThreadError)?;
Ok(())
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/gdb/event_loop.rs | src/vmm/src/gdb/event_loop.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::os::unix::net::UnixStream;
use std::sync::mpsc::Receiver;
use std::sync::mpsc::TryRecvError::Empty;
use std::sync::{Arc, Mutex};
use gdbstub::common::{Signal, Tid};
use gdbstub::conn::{Connection, ConnectionExt};
use gdbstub::stub::run_blocking::{self, WaitForStopReasonError};
use gdbstub::stub::{DisconnectReason, GdbStub, MultiThreadStopReason};
use gdbstub::target::Target;
use vm_memory::GuestAddress;
use super::target::{FirecrackerTarget, GdbTargetError, vcpuid_to_tid};
use crate::Vmm;
use crate::logger::{error, trace};
/// Starts the GDB event loop which acts as a proxy between the Vcpus and GDB
pub fn event_loop(
connection: UnixStream,
vmm: Arc<Mutex<Vmm>>,
gdb_event_receiver: Receiver<usize>,
entry_addr: GuestAddress,
) {
let target = FirecrackerTarget::new(vmm, gdb_event_receiver, entry_addr);
let connection: Box<dyn ConnectionExt<Error = std::io::Error>> = { Box::new(connection) };
let debugger = GdbStub::new(connection);
// We wait for the VM to reach the inital breakpoint we inserted before starting the event loop
target
.gdb_event
.recv()
.expect("Error getting initial gdb event");
gdb_event_loop_thread(debugger, target);
}
struct GdbBlockingEventLoop {}
impl run_blocking::BlockingEventLoop for GdbBlockingEventLoop {
type Target = FirecrackerTarget;
type Connection = Box<dyn ConnectionExt<Error = std::io::Error>>;
type StopReason = MultiThreadStopReason<u64>;
/// Poll for events from either Vcpu's or packets from the GDB connection
fn wait_for_stop_reason(
target: &mut FirecrackerTarget,
conn: &mut Self::Connection,
) -> Result<
run_blocking::Event<MultiThreadStopReason<u64>>,
run_blocking::WaitForStopReasonError<
<Self::Target as Target>::Error,
<Self::Connection as Connection>::Error,
>,
> {
loop {
match target.gdb_event.try_recv() {
Ok(cpu_id) => {
// The Vcpu reports it's id from raw_id so we straight convert here
let tid = Tid::new(cpu_id).expect("Error converting cpu id to Tid");
// If notify paused returns false this means we were already debugging a single
// core, the target will track this for us to pick up later
target.set_paused_vcpu(tid);
trace!("Vcpu: {tid:?} paused from debug exit");
let stop_reason = target
.get_stop_reason(tid)
.map_err(WaitForStopReasonError::Target)?;
let Some(stop_response) = stop_reason else {
// If we returned None this is a break which should be handled by
// the guest kernel (e.g. kernel int3 self testing) so we won't notify
// GDB and instead inject this back into the guest
target
.inject_bp_to_guest(tid)
.map_err(WaitForStopReasonError::Target)?;
target
.resume_vcpu(tid)
.map_err(WaitForStopReasonError::Target)?;
trace!("Injected BP into guest early exit");
continue;
};
trace!("Returned stop reason to gdb: {stop_response:?}");
return Ok(run_blocking::Event::TargetStopped(stop_response));
}
Err(Empty) => (),
Err(_) => {
return Err(WaitForStopReasonError::Target(
GdbTargetError::GdbQueueError,
));
}
}
if conn.peek().map(|b| b.is_some()).unwrap_or(false) {
let byte = conn
.read()
.map_err(run_blocking::WaitForStopReasonError::Connection)?;
return Ok(run_blocking::Event::IncomingData(byte));
}
}
}
/// Invoked when the GDB client sends a Ctrl-C interrupt.
fn on_interrupt(
target: &mut FirecrackerTarget,
) -> Result<Option<MultiThreadStopReason<u64>>, <FirecrackerTarget as Target>::Error> {
// notify the target that a ctrl-c interrupt has occurred.
let main_core = vcpuid_to_tid(0)?;
target.pause_vcpu(main_core)?;
target.set_paused_vcpu(main_core);
let exit_reason = MultiThreadStopReason::SignalWithThread {
tid: main_core,
signal: Signal::SIGINT,
};
Ok(Some(exit_reason))
}
}
/// Runs while communication with GDB is in progress, after GDB disconnects we
/// shutdown firecracker
fn gdb_event_loop_thread(
debugger: GdbStub<FirecrackerTarget, Box<dyn ConnectionExt<Error = std::io::Error>>>,
mut target: FirecrackerTarget,
) {
match debugger.run_blocking::<GdbBlockingEventLoop>(&mut target) {
Ok(disconnect_reason) => match disconnect_reason {
DisconnectReason::Disconnect => {
trace!("Client disconnected")
}
DisconnectReason::TargetExited(code) => {
trace!("Target exited with code {}", code)
}
DisconnectReason::TargetTerminated(sig) => {
trace!("Target terminated with signal {}", sig)
}
DisconnectReason::Kill => trace!("GDB sent a kill command"),
},
Err(e) => {
if e.is_target_error() {
error!("target encountered a fatal error: {e:?}")
} else if e.is_connection_error() {
error!("connection error: {e:?}")
} else {
error!("gdbstub encountered a fatal error {e:?}")
}
}
}
target.shutdown_vmm();
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/gdb/arch/x86.rs | src/vmm/src/gdb/arch/x86.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use gdbstub_arch::x86::reg::X86_64CoreRegs as CoreRegs;
use kvm_bindings::*;
use kvm_ioctls::VcpuFd;
use vm_memory::GuestAddress;
use crate::Vmm;
use crate::gdb::target::GdbTargetError;
use crate::logger::error;
/// Sets the 9th (Global Exact Breakpoint enable) and the 10th (always 1) bits for the DR7 debug
/// control register
const X86_GLOBAL_DEBUG_ENABLE: u64 = 0b11 << 9;
/// Op code to trigger a software breakpoint in x86
const X86_SW_BP_OP: u8 = 0xCC;
/// Configures the number of bytes required for a software breakpoint
pub const SW_BP_SIZE: usize = 1;
/// The bytes stored for an x86 software breakpoint
pub const SW_BP: [u8; SW_BP_SIZE] = [X86_SW_BP_OP];
/// Gets the RIP value for a Vcpu
pub fn get_instruction_pointer(vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> {
let regs = vcpu_fd.get_regs()?;
Ok(regs.rip)
}
/// Translates a virtual address according to the vCPU's current address translation mode.
pub fn translate_gva(vcpu_fd: &VcpuFd, gva: u64, _vmm: &Vmm) -> Result<u64, GdbTargetError> {
let tr = vcpu_fd.translate_gva(gva)?;
if tr.valid == 0 {
return Err(GdbTargetError::GvaTranslateError);
}
Ok(tr.physical_address)
}
/// Configures the kvm guest debug regs to register the hardware breakpoints, the `arch.debugreg`
/// attribute is used to store the location of the hardware breakpoints, with the 8th slot being
/// used as a bitfield to track which registers are enabled and setting the
/// `X86_GLOBAL_DEBUG_ENABLE` flags. Further reading on the DR7 register can be found here:
/// https://en.wikipedia.org/wiki/X86_debug_register#DR7_-_Debug_control
fn set_kvm_debug(
control: u32,
vcpu_fd: &VcpuFd,
addrs: &[GuestAddress],
) -> Result<(), GdbTargetError> {
let mut dbg = kvm_guest_debug {
control,
..Default::default()
};
dbg.arch.debugreg[7] = X86_GLOBAL_DEBUG_ENABLE;
for (i, addr) in addrs.iter().enumerate() {
dbg.arch.debugreg[i] = addr.0;
// Set global breakpoint enable flag for the specific breakpoint number by setting the bit
dbg.arch.debugreg[7] |= 2 << (i * 2);
}
vcpu_fd.set_guest_debug(&dbg)?;
Ok(())
}
/// Configures the Vcpu for debugging and sets the hardware breakpoints on the Vcpu
pub fn vcpu_set_debug(
vcpu_fd: &VcpuFd,
addrs: &[GuestAddress],
step: bool,
) -> Result<(), GdbTargetError> {
let mut control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP | KVM_GUESTDBG_USE_SW_BP;
if step {
control |= KVM_GUESTDBG_SINGLESTEP;
}
set_kvm_debug(control, vcpu_fd, addrs)
}
/// Injects a BP back into the guest kernel for it to handle, this is particularly useful for the
/// kernels selftesting which can happen during boot.
pub fn vcpu_inject_bp(
vcpu_fd: &VcpuFd,
addrs: &[GuestAddress],
step: bool,
) -> Result<(), GdbTargetError> {
let mut control = KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
| KVM_GUESTDBG_USE_SW_BP
| KVM_GUESTDBG_INJECT_BP;
if step {
control |= KVM_GUESTDBG_SINGLESTEP;
}
set_kvm_debug(control, vcpu_fd, addrs)
}
/// Reads the registers for the Vcpu
pub fn read_registers(vcpu_fd: &VcpuFd, regs: &mut CoreRegs) -> Result<(), GdbTargetError> {
let cpu_regs = vcpu_fd.get_regs()?;
regs.regs[0] = cpu_regs.rax;
regs.regs[1] = cpu_regs.rbx;
regs.regs[2] = cpu_regs.rcx;
regs.regs[3] = cpu_regs.rdx;
regs.regs[4] = cpu_regs.rsi;
regs.regs[5] = cpu_regs.rdi;
regs.regs[6] = cpu_regs.rbp;
regs.regs[7] = cpu_regs.rsp;
regs.regs[8] = cpu_regs.r8;
regs.regs[9] = cpu_regs.r9;
regs.regs[10] = cpu_regs.r10;
regs.regs[11] = cpu_regs.r11;
regs.regs[12] = cpu_regs.r12;
regs.regs[13] = cpu_regs.r13;
regs.regs[14] = cpu_regs.r14;
regs.regs[15] = cpu_regs.r15;
regs.rip = cpu_regs.rip;
regs.eflags = u32::try_from(cpu_regs.rflags).map_err(|e| {
error!("Error {e:?} converting rflags to u32");
GdbTargetError::RegFlagConversionError
})?;
Ok(())
}
/// Writes to the registers for the Vcpu
pub fn write_registers(vcpu_fd: &VcpuFd, regs: &CoreRegs) -> Result<(), GdbTargetError> {
let new_regs = kvm_regs {
rax: regs.regs[0],
rbx: regs.regs[1],
rcx: regs.regs[2],
rdx: regs.regs[3],
rsi: regs.regs[4],
rdi: regs.regs[5],
rbp: regs.regs[6],
rsp: regs.regs[7],
r8: regs.regs[8],
r9: regs.regs[9],
r10: regs.regs[10],
r11: regs.regs[11],
r12: regs.regs[12],
r13: regs.regs[13],
r14: regs.regs[14],
r15: regs.regs[15],
rip: regs.rip,
rflags: regs.eflags as u64,
};
Ok(vcpu_fd.set_regs(&new_regs)?)
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/gdb/arch/mod.rs | src/vmm/src/gdb/arch/mod.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
#[cfg(target_arch = "aarch64")]
mod aarch64;
#[cfg(target_arch = "aarch64")]
pub use aarch64::*;
#[cfg(target_arch = "x86_64")]
mod x86;
#[cfg(target_arch = "x86_64")]
pub use x86::*;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/gdb/arch/aarch64.rs | src/vmm/src/gdb/arch/aarch64.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::mem::offset_of;
use gdbstub_arch::aarch64::reg::AArch64CoreRegs as CoreRegs;
use kvm_bindings::{
KVM_GUESTDBG_ENABLE, KVM_GUESTDBG_SINGLESTEP, KVM_GUESTDBG_USE_HW, KVM_GUESTDBG_USE_SW_BP,
KVM_REG_ARM_CORE, KVM_REG_ARM64, KVM_REG_SIZE_U64, kvm_guest_debug, kvm_regs, user_pt_regs,
};
use kvm_ioctls::VcpuFd;
use vm_memory::{Bytes, GuestAddress};
use crate::Vmm;
use crate::arch::aarch64::regs::{
Aarch64RegisterVec, ID_AA64MMFR0_EL1, TCR_EL1, TTBR1_EL1, arm64_core_reg_id,
};
use crate::arch::aarch64::vcpu::get_registers;
use crate::gdb::target::GdbTargetError;
/// Configures the number of bytes required for a software breakpoint.
///
/// The breakpoint instruction operation also includes the immediate argument which we 0 hence the
/// size.
pub const SW_BP_SIZE: usize = 4;
/// The bytes stored for a software breakpoint.
///
/// This is the BRK instruction with a 0 immediate argument.
/// https://developer.arm.com/documentation/ddi0602/2024-09/Base-Instructions/BRK--Breakpoint-instruction-
pub const SW_BP: [u8; SW_BP_SIZE] = [0, 0, 32, 212];
/// Register id for the program counter
const PC_REG_ID: u64 = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pc));
/// Retrieve a single register from a Vcpu
fn get_sys_reg(reg: u64, vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> {
let mut register_vec = Aarch64RegisterVec::default();
get_registers(vcpu_fd, &[reg], &mut register_vec)?;
let register = register_vec
.iter()
.next()
.ok_or(GdbTargetError::ReadRegisterVecError)?;
Ok(register.value())
}
/// Gets the PC value for a Vcpu
pub fn get_instruction_pointer(vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> {
get_sys_reg(PC_REG_ID, vcpu_fd)
}
/// Helper to extract a specific number of bits at an offset from a u64
macro_rules! extract_bits_64 {
($value: tt, $offset: tt, $length: tt) => {
($value >> $offset) & (!0u64 >> (64 - $length))
};
}
/// Mask to clear the last 3 bits from the page table entry
const PTE_ADDRESS_MASK: u64 = !0b111u64;
/// Read a u64 value from a guest memory address
fn read_address(vmm: &Vmm, address: u64) -> Result<u64, GdbTargetError> {
let mut buf = [0; 8];
vmm.vm
.guest_memory()
.read(&mut buf, GuestAddress(address))?;
Ok(u64::from_le_bytes(buf))
}
/// The grainsize used with 4KB paging
const GRAIN_SIZE: usize = 9;
/// Translates a virtual address according to the Vcpu's current address translation mode.
/// Returns the GPA (guest physical address)
///
/// To simplify the implementation we've made some assumptions about the paging setup.
/// Here we just assert firstly paging is setup and these assumptions are correct.
pub fn translate_gva(vcpu_fd: &VcpuFd, gva: u64, vmm: &Vmm) -> Result<u64, GdbTargetError> {
// Check this virtual address is in kernel space
if extract_bits_64!(gva, 55, 1) == 0 {
return Err(GdbTargetError::GvaTranslateError);
}
// Translation control register
let tcr_el1: u64 = get_sys_reg(TCR_EL1, vcpu_fd)?;
// If this is 0 then translation is not yet ready
if extract_bits_64!(tcr_el1, 16, 6) == 0 {
return Ok(gva);
}
// Check 4KB pages are being used
if extract_bits_64!(tcr_el1, 30, 2) != 2 {
return Err(GdbTargetError::GvaTranslateError);
}
// ID_AA64MMFR0_EL1 provides information about the implemented memory model and memory
// management. Check this is a physical address size we support
let pa_size = match get_sys_reg(ID_AA64MMFR0_EL1, vcpu_fd)? & 0b1111 {
0 => 32,
1 => 36,
2 => 40,
3 => 42,
4 => 44,
5 => 48,
_ => return Err(GdbTargetError::GvaTranslateError),
};
// A mask of the physical address size for a virtual address
let pa_address_mask: u64 = !0u64 >> (64 - pa_size);
// A mask used to take the bottom 12 bits of a value this is as we have a grainsize of 9
// asserted with our 4kb page, plus the offset of 3
let lower_mask: u64 = 0xFFF;
// A mask for a physical address mask with the lower 12 bits cleared
let desc_mask: u64 = pa_address_mask & !lower_mask;
let page_indices = [
(gva >> (GRAIN_SIZE * 4)) & lower_mask,
(gva >> (GRAIN_SIZE * 3)) & lower_mask,
(gva >> (GRAIN_SIZE * 2)) & lower_mask,
(gva >> GRAIN_SIZE) & lower_mask,
];
// Transition table base register used for initial table lookup.
// Take the bottom 48 bits from the register value.
let mut address: u64 = get_sys_reg(TTBR1_EL1, vcpu_fd)? & pa_address_mask;
let mut level = 0;
while level < 4 {
// Clear the bottom 3 bits from this address
let pte = read_address(vmm, (address + page_indices[level]) & PTE_ADDRESS_MASK)?;
address = pte & desc_mask;
// If this is a valid table entry and we aren't at the end of the page tables
// then loop again and check next level
if (pte & 2 != 0) && (level < 3) {
level += 1;
continue;
}
break;
}
// Generate a mask to split between the page table entry and the GVA. The split point is
// dependent on which level we terminate at. This is calculated by taking the level we
// hit multiplied by the grainsize then adding the 3 offset
let page_size = 1u64 << ((GRAIN_SIZE * (4 - level)) + 3);
// Clear bottom bits of page size
address &= !(page_size - 1);
address |= gva & (page_size - 1);
Ok(address)
}
/// Configures the kvm guest debug regs to register the hardware breakpoints
fn set_kvm_debug(
control: u32,
vcpu_fd: &VcpuFd,
addrs: &[GuestAddress],
) -> Result<(), GdbTargetError> {
let mut dbg = kvm_guest_debug {
control,
..Default::default()
};
for (i, addr) in addrs.iter().enumerate() {
// DBGBCR_EL1 (Debug Breakpoint Control Registers, D13.3.2):
// bit 0: 1 (Enabled)
// bit 1~2: 0b11 (PMC = EL1/EL0)
// bit 5~8: 0b1111 (BAS = AArch64)
// others: 0
dbg.arch.dbg_bcr[i] = 0b1 | (0b11 << 1) | (0b1111 << 5);
// DBGBVR_EL1 (Debug Breakpoint Value Registers, D13.3.3):
// bit 2~52: VA[2:52]
dbg.arch.dbg_bvr[i] = (!0u64 >> 11) & addr.0;
}
vcpu_fd.set_guest_debug(&dbg)?;
Ok(())
}
/// Bits in a Vcpu pstate for IRQ
const IRQ_ENABLE_FLAGS: u64 = 0x80 | 0x40;
/// Register id for pstate
const PSTATE_ID: u64 = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pstate));
/// Disable IRQ interrupts to avoid getting stuck in a loop while single stepping
///
/// When GDB hits a single breakpoint and resumes it will follow the steps:
/// - Clear SW breakpoint we've hit
/// - Single step
/// - Re-insert the SW breakpoint
/// - Resume
/// However, with IRQ enabled the single step takes us into the IRQ handler so when we resume we
/// immediately hit the SW breapoint we just re-inserted getting stuck in a loop.
fn toggle_interrupts(vcpu_fd: &VcpuFd, enable: bool) -> Result<(), GdbTargetError> {
let mut pstate = get_sys_reg(PSTATE_ID, vcpu_fd)?;
if enable {
pstate |= IRQ_ENABLE_FLAGS;
} else {
pstate &= !IRQ_ENABLE_FLAGS;
}
vcpu_fd.set_one_reg(PSTATE_ID, &pstate.to_le_bytes())?;
Ok(())
}
/// Configures the Vcpu for debugging and sets the hardware breakpoints on the Vcpu
pub fn vcpu_set_debug(
vcpu_fd: &VcpuFd,
addrs: &[GuestAddress],
step: bool,
) -> Result<(), GdbTargetError> {
let mut control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW | KVM_GUESTDBG_USE_SW_BP;
if step {
control |= KVM_GUESTDBG_SINGLESTEP;
}
toggle_interrupts(vcpu_fd, step)?;
set_kvm_debug(control, vcpu_fd, addrs)
}
/// KVM does not support injecting breakpoints on aarch64 so this is a no-op
pub fn vcpu_inject_bp(
_vcpu_fd: &VcpuFd,
_addrs: &[GuestAddress],
_step: bool,
) -> Result<(), GdbTargetError> {
Ok(())
}
/// The number of general purpose registers
const GENERAL_PURPOSE_REG_COUNT: usize = 31;
/// The number of core registers we read from the Vcpu
const CORE_REG_COUNT: usize = 33;
/// Stores the register ids of registers to be read from the Vcpu
const CORE_REG_IDS: [u64; CORE_REG_COUNT] = {
let mut regs = [0; CORE_REG_COUNT];
let mut idx = 0;
let reg_offset = offset_of!(kvm_regs, regs);
let mut off = reg_offset;
while idx < GENERAL_PURPOSE_REG_COUNT {
regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, off);
idx += 1;
off += std::mem::size_of::<u64>();
}
regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, sp));
idx += 1;
regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pc));
regs
};
/// Reads the registers for the Vcpu
pub fn read_registers(vcpu_fd: &VcpuFd, regs: &mut CoreRegs) -> Result<(), GdbTargetError> {
let mut register_vec = Aarch64RegisterVec::default();
get_registers(vcpu_fd, &CORE_REG_IDS, &mut register_vec)?;
let mut registers = register_vec.iter();
for i in 0..GENERAL_PURPOSE_REG_COUNT {
regs.x[i] = registers
.next()
.ok_or(GdbTargetError::ReadRegisterVecError)?
.value();
}
regs.sp = registers
.next()
.ok_or(GdbTargetError::ReadRegisterVecError)?
.value();
regs.pc = registers
.next()
.ok_or(GdbTargetError::ReadRegisterVecError)?
.value();
Ok(())
}
/// Writes to the registers for the Vcpu
pub fn write_registers(vcpu_fd: &VcpuFd, regs: &CoreRegs) -> Result<(), GdbTargetError> {
let kreg_off = offset_of!(kvm_regs, regs);
let mut off = kreg_off;
for i in 0..GENERAL_PURPOSE_REG_COUNT {
vcpu_fd.set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
®s.x[i].to_le_bytes(),
)?;
off += std::mem::size_of::<u64>();
}
let off = offset_of!(user_pt_regs, sp);
vcpu_fd.set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off + kreg_off),
®s.sp.to_le_bytes(),
)?;
let off = offset_of!(user_pt_regs, pc);
vcpu_fd.set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off + kreg_off),
®s.pc.to_le_bytes(),
)?;
Ok(())
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/rate_limiter/persist.rs | src/vmm/src/rate_limiter/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the structures needed for saving/restoring a RateLimiter.
use serde::{Deserialize, Serialize};
use utils::time::TimerFd;
use super::*;
use crate::snapshot::Persist;
/// State for saving a TokenBucket.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TokenBucketState {
size: u64,
one_time_burst: u64,
refill_time: u64,
budget: u64,
elapsed_ns: u64,
}
impl Persist<'_> for TokenBucket {
type State = TokenBucketState;
type ConstructorArgs = ();
type Error = io::Error;
fn save(&self) -> Self::State {
TokenBucketState {
size: self.size,
one_time_burst: self.one_time_burst,
refill_time: self.refill_time,
budget: self.budget,
// This should be safe for a duration of about 584 years.
elapsed_ns: u64::try_from(self.last_update.elapsed().as_nanos()).unwrap(),
}
}
fn restore(_: Self::ConstructorArgs, state: &Self::State) -> Result<Self, Self::Error> {
let now = Instant::now();
let last_update = now
.checked_sub(Duration::from_nanos(state.elapsed_ns))
.unwrap_or(now);
let mut token_bucket =
TokenBucket::new(state.size, state.one_time_burst, state.refill_time)
.ok_or_else(|| io::Error::from(io::ErrorKind::InvalidInput))?;
token_bucket.budget = state.budget;
token_bucket.last_update = last_update;
Ok(token_bucket)
}
}
/// State for saving a RateLimiter.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RateLimiterState {
ops: Option<TokenBucketState>,
bandwidth: Option<TokenBucketState>,
}
impl Persist<'_> for RateLimiter {
type State = RateLimiterState;
type ConstructorArgs = ();
type Error = io::Error;
fn save(&self) -> Self::State {
RateLimiterState {
ops: self.ops.as_ref().map(|ops| ops.save()),
bandwidth: self.bandwidth.as_ref().map(|bw| bw.save()),
}
}
fn restore(_: Self::ConstructorArgs, state: &Self::State) -> Result<Self, Self::Error> {
let rate_limiter = RateLimiter {
ops: if let Some(ops) = state.ops.as_ref() {
Some(TokenBucket::restore((), ops)?)
} else {
None
},
bandwidth: if let Some(bw) = state.bandwidth.as_ref() {
Some(TokenBucket::restore((), bw)?)
} else {
None
},
timer_fd: TimerFd::new(),
timer_active: false,
};
Ok(rate_limiter)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::snapshot::Snapshot;
#[test]
fn test_token_bucket_persistence() {
let mut tb = TokenBucket::new(1000, 2000, 3000).unwrap();
// Check that TokenBucket restores correctly if untouched.
let restored_tb = TokenBucket::restore((), &tb.save()).unwrap();
assert!(tb.partial_eq(&restored_tb));
// Check that TokenBucket restores correctly after partially consuming tokens.
tb.reduce(100);
let restored_tb = TokenBucket::restore((), &tb.save()).unwrap();
assert!(tb.partial_eq(&restored_tb));
// Check that TokenBucket restores correctly after replenishing tokens.
tb.force_replenish(100);
let restored_tb = TokenBucket::restore((), &tb.save()).unwrap();
assert!(tb.partial_eq(&restored_tb));
// Test serialization.
let mut mem = vec![0; 4096];
Snapshot::new(tb.save())
.save(&mut mem.as_mut_slice())
.unwrap();
let restored_tb = TokenBucket::restore(
(),
&Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data,
)
.unwrap();
assert!(tb.partial_eq(&restored_tb));
}
#[test]
fn test_rate_limiter_persistence() {
let refill_time = 100_000;
let mut rate_limiter = RateLimiter::new(100, 0, refill_time, 10, 0, refill_time).unwrap();
// Check that RateLimiter restores correctly if untouched.
let restored_rate_limiter =
RateLimiter::restore((), &rate_limiter.save()).expect("Unable to restore rate limiter");
assert!(
rate_limiter
.ops()
.unwrap()
.partial_eq(restored_rate_limiter.ops().unwrap())
);
assert!(
rate_limiter
.bandwidth()
.unwrap()
.partial_eq(restored_rate_limiter.bandwidth().unwrap())
);
assert!(!restored_rate_limiter.timer_fd.is_armed());
// Check that RateLimiter restores correctly after partially consuming tokens.
rate_limiter.consume(10, TokenType::Bytes);
rate_limiter.consume(10, TokenType::Ops);
let restored_rate_limiter =
RateLimiter::restore((), &rate_limiter.save()).expect("Unable to restore rate limiter");
assert!(
rate_limiter
.ops()
.unwrap()
.partial_eq(restored_rate_limiter.ops().unwrap())
);
assert!(
rate_limiter
.bandwidth()
.unwrap()
.partial_eq(restored_rate_limiter.bandwidth().unwrap())
);
assert!(!restored_rate_limiter.timer_fd.is_armed());
// Check that RateLimiter restores correctly after totally consuming tokens.
rate_limiter.consume(1000, TokenType::Bytes);
let restored_rate_limiter =
RateLimiter::restore((), &rate_limiter.save()).expect("Unable to restore rate limiter");
assert!(
rate_limiter
.ops()
.unwrap()
.partial_eq(restored_rate_limiter.ops().unwrap())
);
assert!(
rate_limiter
.bandwidth()
.unwrap()
.partial_eq(restored_rate_limiter.bandwidth().unwrap())
);
// Test serialization.
let mut mem = vec![0; 4096];
Snapshot::new(rate_limiter.save())
.save(&mut mem.as_mut_slice())
.unwrap();
let restored_rate_limiter = RateLimiter::restore(
(),
&Snapshot::load_without_crc_check(mem.as_slice())
.unwrap()
.data,
)
.unwrap();
assert!(
rate_limiter
.ops()
.unwrap()
.partial_eq(restored_rate_limiter.ops().unwrap())
);
assert!(
rate_limiter
.bandwidth()
.unwrap()
.partial_eq(restored_rate_limiter.bandwidth().unwrap())
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/rate_limiter/mod.rs | src/vmm/src/rate_limiter/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::os::unix::io::{AsRawFd, RawFd};
use std::time::{Duration, Instant};
use std::{fmt, io};
use utils::time::TimerFd;
pub mod persist;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// Describes the errors that may occur while handling rate limiter events.
pub enum RateLimiterError {
/// Rate limiter event handler called without a present timer
SpuriousRateLimiterEvent,
}
// Interval at which the refill timer will run when limiter is at capacity.
const REFILL_TIMER_DURATION: Duration = Duration::from_millis(100);
const NANOSEC_IN_ONE_MILLISEC: u64 = 1_000_000;
// Euclid's two-thousand-year-old algorithm for finding the greatest common divisor.
#[cfg_attr(kani, kani::requires(x > 0 && y > 0))]
#[cfg_attr(kani, kani::ensures(
|&result| result != 0
&& x % result == 0
&& y % result == 0
))]
fn gcd(x: u64, y: u64) -> u64 {
let mut x = x;
let mut y = y;
while y != 0 {
let t = y;
y = x % y;
x = t;
}
x
}
/// Enum describing the outcomes of a `reduce()` call on a `TokenBucket`.
#[derive(Clone, Debug, PartialEq)]
pub enum BucketReduction {
/// There are not enough tokens to complete the operation.
Failure,
/// A part of the available tokens have been consumed.
Success,
/// A number of tokens `inner` times larger than the bucket size have been consumed.
OverConsumption(f64),
}
/// TokenBucket provides a lower level interface to rate limiting with a
/// configurable capacity, refill-rate and initial burst.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TokenBucket {
// Bucket defining traits.
size: u64,
// Initial burst size.
initial_one_time_burst: u64,
// Complete refill time in milliseconds.
refill_time: u64,
// Internal state descriptors.
// Number of free initial tokens, that can be consumed at no cost.
one_time_burst: u64,
// Current token budget.
budget: u64,
// Last time this token bucket saw activity.
last_update: Instant,
// Fields used for pre-processing optimizations.
processed_capacity: u64,
processed_refill_time: u64,
}
impl TokenBucket {
/// Creates a `TokenBucket` wrapped in an `Option`.
///
/// TokenBucket created is of `size` total capacity and takes `complete_refill_time_ms`
/// milliseconds to go from zero tokens to total capacity. The `one_time_burst` is initial
/// extra credit on top of total capacity, that does not replenish and which can be used
/// for an initial burst of data.
///
/// If the `size` or the `complete refill time` are zero, then `None` is returned.
pub fn new(size: u64, one_time_burst: u64, complete_refill_time_ms: u64) -> Option<Self> {
// If either token bucket capacity or refill time is 0, disable limiting.
if size == 0 || complete_refill_time_ms == 0 {
return None;
}
// Formula for computing current refill amount:
// refill_token_count = (delta_time * size) / (complete_refill_time_ms * 1_000_000)
// In order to avoid overflows, simplify the fractions by computing greatest common divisor.
let complete_refill_time_ns =
complete_refill_time_ms.checked_mul(NANOSEC_IN_ONE_MILLISEC)?;
// Get the greatest common factor between `size` and `complete_refill_time_ns`.
let common_factor = gcd(size, complete_refill_time_ns);
// The division will be exact since `common_factor` is a factor of `size`.
let processed_capacity: u64 = size / common_factor;
// The division will be exact since `common_factor` is a factor of
// `complete_refill_time_ns`.
let processed_refill_time: u64 = complete_refill_time_ns / common_factor;
Some(TokenBucket {
size,
one_time_burst,
initial_one_time_burst: one_time_burst,
refill_time: complete_refill_time_ms,
// Start off full.
budget: size,
// Last updated is now.
last_update: Instant::now(),
processed_capacity,
processed_refill_time,
})
}
// Replenishes token bucket based on elapsed time. Should only be called internally by `Self`.
#[allow(clippy::cast_possible_truncation)]
fn auto_replenish(&mut self) {
// Compute time passed since last refill/update.
let now = Instant::now();
let time_delta = (now - self.last_update).as_nanos();
if time_delta >= u128::from(self.refill_time * NANOSEC_IN_ONE_MILLISEC) {
self.budget = self.size;
self.last_update = now;
} else {
// At each 'time_delta' nanoseconds the bucket should refill with:
// refill_amount = (time_delta * size) / (complete_refill_time_ms * 1_000_000)
// `processed_capacity` and `processed_refill_time` are the result of simplifying above
// fraction formula with their greatest-common-factor.
// In the constructor, we assured that (self.refill_time * NANOSEC_IN_ONE_MILLISEC)
// fits into a u64 That means, at this point we know that time_delta <
// u64::MAX. Since all other values here are u64, this assures that u128
// multiplication cannot overflow.
let processed_capacity = u128::from(self.processed_capacity);
let processed_refill_time = u128::from(self.processed_refill_time);
let tokens = (time_delta * processed_capacity) / processed_refill_time;
// We increment `self.last_update` by the minimum time required to generate `tokens`, in
// the case where we have the time to generate `1.8` tokens but only
// generate `x` tokens due to integer arithmetic this will carry the time
// required to generate 0.8th of a token over to the next call, such that if
// the next call where to generate `2.3` tokens it would instead
// generate `3.1` tokens. This minimizes dropping tokens at high frequencies.
// We want the integer division here to round up instead of down (as if we round down,
// we would allow some fraction of a nano second to be used twice, allowing
// for the generation of one extra token in extreme circumstances).
let mut time_adjustment = tokens * processed_refill_time / processed_capacity;
if tokens * processed_refill_time % processed_capacity != 0 {
time_adjustment += 1;
}
// Ensure that we always generate as many tokens as we can: assert that the "unused"
// part of time_delta is less than the time it would take to generate a
// single token (= processed_refill_time / processed_capacity)
debug_assert!(time_adjustment <= time_delta);
debug_assert!(
(time_delta - time_adjustment) * processed_capacity <= processed_refill_time
);
// time_adjustment is at most time_delta, and since time_delta <= u64::MAX, this cast is
// fine
self.last_update += Duration::from_nanos(time_adjustment as u64);
self.budget = std::cmp::min(self.budget.saturating_add(tokens as u64), self.size);
}
}
/// Attempts to consume `tokens` from the bucket and returns whether the action succeeded.
pub fn reduce(&mut self, mut tokens: u64) -> BucketReduction {
// First things first: consume the one-time-burst budget.
if self.one_time_burst > 0 {
// We still have burst budget for *all* tokens requests.
if self.one_time_burst >= tokens {
self.one_time_burst -= tokens;
self.last_update = Instant::now();
// No need to continue to the refill process, we still have burst budget to consume
// from.
return BucketReduction::Success;
} else {
// We still have burst budget for *some* of the tokens requests.
// The tokens left unfulfilled will be consumed from current `self.budget`.
tokens -= self.one_time_burst;
self.one_time_burst = 0;
}
}
if tokens > self.budget {
// Hit the bucket bottom, let's auto-replenish and try again.
self.auto_replenish();
// This operation requests a bandwidth higher than the bucket size
if tokens > self.size {
crate::logger::error!(
"Consumed {} tokens from bucket of size {}",
tokens,
self.size
);
// Empty the bucket and report an overconsumption of
// (remaining tokens / size) times larger than the bucket size
tokens -= self.budget;
self.budget = 0;
return BucketReduction::OverConsumption(tokens as f64 / self.size as f64);
}
if tokens > self.budget {
// Still not enough tokens, consume() fails, return false.
return BucketReduction::Failure;
}
}
self.budget -= tokens;
BucketReduction::Success
}
/// "Manually" adds tokens to bucket.
pub fn force_replenish(&mut self, tokens: u64) {
// This means we are still during the burst interval.
// Of course there is a very small chance that the last reduce() also used up burst
// budget which should now be replenished, but for performance and code-complexity
// reasons we're just gonna let that slide since it's practically inconsequential.
if self.one_time_burst > 0 {
self.one_time_burst = std::cmp::min(
self.one_time_burst.saturating_add(tokens),
self.initial_one_time_burst,
);
return;
}
self.budget = std::cmp::min(self.budget.saturating_add(tokens), self.size);
}
/// Returns the capacity of the token bucket.
pub fn capacity(&self) -> u64 {
self.size
}
/// Returns the remaining one time burst budget.
pub fn one_time_burst(&self) -> u64 {
self.one_time_burst
}
/// Returns the time in milliseconds required to to completely fill the bucket.
pub fn refill_time_ms(&self) -> u64 {
self.refill_time
}
/// Returns the current budget (one time burst allowance notwithstanding).
pub fn budget(&self) -> u64 {
self.budget
}
/// Returns the initially configured one time burst budget.
pub fn initial_one_time_burst(&self) -> u64 {
self.initial_one_time_burst
}
}
/// Enum that describes the type of token used.
#[derive(Debug)]
pub enum TokenType {
/// Token type used for bandwidth limiting.
Bytes,
/// Token type used for operations/second limiting.
Ops,
}
/// Enum that describes the type of token bucket update.
#[derive(Debug)]
pub enum BucketUpdate {
/// No Update - same as before.
None,
/// Rate Limiting is disabled on this bucket.
Disabled,
/// Rate Limiting enabled with updated bucket.
Update(TokenBucket),
}
/// Rate Limiter that works on both bandwidth and ops/s limiting.
///
/// Bandwidth (bytes/s) and ops/s limiting can be used at the same time or individually.
///
/// Implementation uses a single timer through TimerFd to refresh either or
/// both token buckets.
///
/// Its internal buckets are 'passively' replenished as they're being used (as
/// part of `consume()` operations).
/// A timer is enabled and used to 'actively' replenish the token buckets when
/// limiting is in effect and `consume()` operations are disabled.
///
/// RateLimiters will generate events on the FDs provided by their `AsRawFd` trait
/// implementation. These events are meant to be consumed by the user of this struct.
/// On each such event, the user must call the `event_handler()` method.
pub struct RateLimiter {
bandwidth: Option<TokenBucket>,
ops: Option<TokenBucket>,
timer_fd: TimerFd,
// Internal flag that quickly determines timer state.
timer_active: bool,
}
impl PartialEq for RateLimiter {
fn eq(&self, other: &RateLimiter) -> bool {
self.bandwidth == other.bandwidth && self.ops == other.ops
}
}
impl fmt::Debug for RateLimiter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"RateLimiter {{ bandwidth: {:?}, ops: {:?} }}",
self.bandwidth, self.ops
)
}
}
impl RateLimiter {
/// Creates a new Rate Limiter that can limit on both bytes/s and ops/s.
///
/// # Arguments
///
/// * `bytes_total_capacity` - the total capacity of the `TokenType::Bytes` token bucket.
/// * `bytes_one_time_burst` - initial extra credit on top of `bytes_total_capacity`, that does
/// not replenish and which can be used for an initial burst of data.
/// * `bytes_complete_refill_time_ms` - number of milliseconds for the `TokenType::Bytes` token
/// bucket to go from zero Bytes to `bytes_total_capacity` Bytes.
/// * `ops_total_capacity` - the total capacity of the `TokenType::Ops` token bucket.
/// * `ops_one_time_burst` - initial extra credit on top of `ops_total_capacity`, that does not
/// replenish and which can be used for an initial burst of data.
/// * `ops_complete_refill_time_ms` - number of milliseconds for the `TokenType::Ops` token
/// bucket to go from zero Ops to `ops_total_capacity` Ops.
///
/// If either bytes/ops *size* or *refill_time* are **zero**, the limiter
/// is **disabled** for that respective token type.
///
/// # Errors
///
/// If the timerfd creation fails, an error is returned.
pub fn new(
bytes_total_capacity: u64,
bytes_one_time_burst: u64,
bytes_complete_refill_time_ms: u64,
ops_total_capacity: u64,
ops_one_time_burst: u64,
ops_complete_refill_time_ms: u64,
) -> io::Result<Self> {
let bytes_token_bucket = TokenBucket::new(
bytes_total_capacity,
bytes_one_time_burst,
bytes_complete_refill_time_ms,
);
let ops_token_bucket = TokenBucket::new(
ops_total_capacity,
ops_one_time_burst,
ops_complete_refill_time_ms,
);
// We'll need a timer_fd, even if our current config effectively disables rate limiting,
// because `Self::update_buckets()` might re-enable it later, and we might be
// seccomp-blocked from creating the timer_fd at that time.
let timer_fd = TimerFd::new();
Ok(RateLimiter {
bandwidth: bytes_token_bucket,
ops: ops_token_bucket,
timer_fd,
timer_active: false,
})
}
// Arm the timer of the rate limiter with the provided `TimerState`.
fn activate_timer(&mut self, one_shot_duration: Duration) {
// Register the timer; don't care about its previous state
self.timer_fd.arm(one_shot_duration, None);
self.timer_active = true;
}
/// Attempts to consume tokens and returns whether that is possible.
///
/// If rate limiting is disabled on provided `token_type`, this function will always succeed.
pub fn consume(&mut self, tokens: u64, token_type: TokenType) -> bool {
// If the timer is active, we can't consume tokens from any bucket and the function fails.
if self.timer_active {
return false;
}
// Identify the required token bucket.
let token_bucket = match token_type {
TokenType::Bytes => self.bandwidth.as_mut(),
TokenType::Ops => self.ops.as_mut(),
};
// Try to consume from the token bucket.
if let Some(bucket) = token_bucket {
let refill_time = bucket.refill_time_ms();
match bucket.reduce(tokens) {
// When we report budget is over, there will be no further calls here,
// register a timer to replenish the bucket and resume processing;
// make sure there is only one running timer for this limiter.
BucketReduction::Failure => {
if !self.timer_active {
self.activate_timer(REFILL_TIMER_DURATION);
}
false
}
// The operation succeeded and further calls can be made.
BucketReduction::Success => true,
// The operation succeeded as the tokens have been consumed
// but the timer still needs to be armed.
BucketReduction::OverConsumption(ratio) => {
// The operation "borrowed" a number of tokens `ratio` times
// greater than the size of the bucket, and since it takes
// `refill_time` milliseconds to fill an empty bucket, in
// order to enforce the bandwidth limit we need to prevent
// further calls to the rate limiter for
// `ratio * refill_time` milliseconds.
// The conversion should be safe because the ratio is positive.
#[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
self.activate_timer(Duration::from_millis((ratio * refill_time as f64) as u64));
true
}
}
} else {
// If bucket is not present rate limiting is disabled on token type,
// consume() will always succeed.
true
}
}
/// Adds tokens of `token_type` to their respective bucket.
///
/// Can be used to *manually* add tokens to a bucket. Useful for reverting a
/// `consume()` if needed.
pub fn manual_replenish(&mut self, tokens: u64, token_type: TokenType) {
// Identify the required token bucket.
let token_bucket = match token_type {
TokenType::Bytes => self.bandwidth.as_mut(),
TokenType::Ops => self.ops.as_mut(),
};
// Add tokens to the token bucket.
if let Some(bucket) = token_bucket {
bucket.force_replenish(tokens);
}
}
/// Returns whether this rate limiter is blocked.
///
/// The limiter 'blocks' when a `consume()` operation fails because there was not enough
/// budget for it.
/// An event will be generated on the exported FD when the limiter 'unblocks'.
pub fn is_blocked(&self) -> bool {
self.timer_active
}
/// This function needs to be called every time there is an event on the
/// FD provided by this object's `AsRawFd` trait implementation.
///
/// # Errors
///
/// If the rate limiter is disabled or is not blocked, an error is returned.
pub fn event_handler(&mut self) -> Result<(), RateLimiterError> {
match self.timer_fd.read() {
0 => Err(RateLimiterError::SpuriousRateLimiterEvent),
_ => {
self.timer_active = false;
Ok(())
}
}
}
/// Updates the parameters of the token buckets associated with this RateLimiter.
// TODO: Please note that, right now, the buckets become full after being updated.
pub fn update_buckets(&mut self, bytes: BucketUpdate, ops: BucketUpdate) {
match bytes {
BucketUpdate::Disabled => self.bandwidth = None,
BucketUpdate::Update(tb) => self.bandwidth = Some(tb),
BucketUpdate::None => (),
};
match ops {
BucketUpdate::Disabled => self.ops = None,
BucketUpdate::Update(tb) => self.ops = Some(tb),
BucketUpdate::None => (),
};
}
/// Returns an immutable view of the inner bandwidth token bucket.
pub fn bandwidth(&self) -> Option<&TokenBucket> {
self.bandwidth.as_ref()
}
/// Returns an immutable view of the inner ops token bucket.
pub fn ops(&self) -> Option<&TokenBucket> {
self.ops.as_ref()
}
}
impl AsRawFd for RateLimiter {
/// Provides a FD which needs to be monitored for POLLIN events.
///
/// This object's `event_handler()` method must be called on such events.
///
/// Will return a negative value if rate limiting is disabled on both
/// token types.
fn as_raw_fd(&self) -> RawFd {
self.timer_fd.as_raw_fd()
}
}
impl Default for RateLimiter {
/// Default RateLimiter is a no-op limiter with infinite budget.
fn default() -> Self {
// Safe to unwrap since this will not attempt to create timer_fd.
RateLimiter::new(0, 0, 0, 0, 0, 0).expect("Failed to build default RateLimiter")
}
}
#[cfg(kani)]
#[allow(dead_code)] // Avoid warning when using stubs.
mod verification {
use std::time::Instant;
use super::*;
mod stubs {
use std::time::Instant;
use crate::rate_limiter::TokenBucket;
// On Unix, the Rust Standard Library defines Instants as
//
// struct Instance(struct inner::Instant {
// t: struct Timespec {
// tv_sec: i64,
// tv_nsec: struct Nanoseconds(u32),
// }
// }
//
// This is not really repr-compatible with the below, as the structs (apart from
// `Nanoseconds`) are repr(Rust), but currently this seems to work.
#[repr(C)]
struct InstantStub {
tv_sec: i64,
tv_nsec: u32,
}
// The last value returned by this stub, in nano seconds. We keep these variables separately
// for Kani performance reasons (just counting nanos and then doing division/modulo
// to get seconds/nanos is slow as those operations are very difficult for Kani's
// underlying SAT solvers).
static mut LAST_SECONDS: i64 = 0;
static mut LAST_NANOS: u32 = 0;
/// Stubs out `std::time::Instant::now` to return non-deterministic instances that are
/// non-decreasing. The first value produced by this stub will always be 0. This is
/// because generally harnesses only care about the delta between instants i1 and i2, which
/// is arbitrary as long as at least one of i1, i2 is non-deterministic. Therefore,
/// hardcoding one of the instances to be 0 brings a performance improvement. Should
/// a harness loose generality due to the first Instant::now() call returning 0, add a
/// dummy call to Instant::now() to the top of the harness to consume the 0 value. All
/// subsequent calls will then result in non-deterministic values.
fn instant_now() -> Instant {
// Instants are non-decreasing.
// See https://doc.rust-lang.org/std/time/struct.Instant.html.
// upper bound on seconds to prevent scenarios involving clock overflow.
let next_seconds = kani::any_where(|n| *n >= unsafe { LAST_SECONDS });
let next_nanos = kani::any_where(|n| *n < 1_000_000_000); // rustc intrinsic bound
if next_seconds == unsafe { LAST_SECONDS } {
kani::assume(next_nanos >= unsafe { LAST_NANOS });
}
let to_return = next_instant_now();
unsafe {
LAST_SECONDS = next_seconds;
LAST_NANOS = next_nanos;
}
to_return
}
pub(super) fn next_instant_now() -> Instant {
let stub = InstantStub {
tv_sec: unsafe { LAST_SECONDS },
tv_nsec: unsafe { LAST_NANOS },
};
// In normal rust code, this would not be safe, as the compiler can re-order the fields
// However, kani will never run any transformations on the code, so this is safe. This
// is because kani doesn't use rustc/llvm to compile down to bytecode, but instead
// transpiles unoptimized rust MIR to goto-programs, which are then fed to CMBC.
unsafe { std::mem::transmute(stub) }
}
/// Stubs out `TokenBucket::auto_replenish` by simply filling up the bucket by a
/// non-deterministic amount.
fn token_bucket_auto_replenish(this: &mut TokenBucket) {
this.budget += kani::any_where::<u64, _>(|&n| n <= this.size - this.budget);
}
}
impl TokenBucket {
/// Functions checking that the general invariants of a TokenBucket are upheld
fn is_valid(&self) -> bool {
self.size != 0
&& self.refill_time != 0
// The token budget can never exceed the bucket's size
&& self.budget <= self.size
// The burst budget never exceeds its initial value
&& self.one_time_burst <= self.initial_one_time_burst
// While burst budget is available, no tokens from the normal budget are consumed.
&& (self.one_time_burst == 0 || self.budget == self.size)
}
}
impl kani::Arbitrary for TokenBucket {
fn any() -> TokenBucket {
let bucket = TokenBucket::new(kani::any(), kani::any(), kani::any());
kani::assume(bucket.is_some());
let mut bucket = bucket.unwrap();
// Adjust the budgets non-deterministically to simulate that the bucket has been "in
// use" already
bucket.budget = kani::any();
bucket.one_time_burst = kani::any();
kani::assume(bucket.is_valid());
bucket
}
}
#[kani::proof]
#[kani::stub(std::time::Instant::now, stubs::instant_now)]
fn verify_instant_stub_non_decreasing() {
let early = Instant::now();
let late = Instant::now();
assert!(early <= late);
}
// Euclid algorithm has runtime O(log(min(x,y))) -> kani::unwind(log(MAX)) should be enough.
#[kani::proof_for_contract(gcd)]
#[kani::unwind(64)]
#[kani::solver(cadical)]
fn gcd_contract_harness() {
const MAX: u64 = 64;
let x = kani::any_where(|&x| x < MAX);
let y = kani::any_where(|&y| y < MAX);
let gcd = super::gcd(x, y);
// Most assertions are unnecessary as they are proved as part of the
// contract. However for simplification the contract only enforces that
// the result is *a* divisor, not necessarily the smallest one, so we
// check that here manually.
if gcd != 0 {
let w = kani::any_where(|&w| w > 0 && x % w == 0 && y % w == 0);
assert!(gcd >= w);
}
}
#[kani::proof]
#[kani::stub(std::time::Instant::now, stubs::instant_now)]
#[kani::stub_verified(gcd)]
#[kani::solver(cadical)]
fn verify_token_bucket_new() {
let size = kani::any();
let one_time_burst = kani::any();
let complete_refill_time_ms = kani::any();
// Checks if the `TokenBucket` is created with invalid inputs, the result is always `None`.
match TokenBucket::new(size, one_time_burst, complete_refill_time_ms) {
None => assert!(
size == 0
|| complete_refill_time_ms == 0
|| complete_refill_time_ms > u64::MAX / NANOSEC_IN_ONE_MILLISEC
),
Some(bucket) => assert!(bucket.is_valid()),
}
}
#[kani::proof]
#[kani::unwind(1)] // enough to unwind the recursion at `Timespec::sub_timespec`
#[kani::stub(std::time::Instant::now, stubs::instant_now)]
#[kani::stub_verified(gcd)]
fn verify_token_bucket_auto_replenish() {
const MAX_BUCKET_SIZE: u64 = 15;
const MAX_REFILL_TIME: u64 = 15;
// Create a non-deterministic `TokenBucket`. This internally calls `Instant::now()`, which
// is stubbed to always return 0 on its first call. We can make this simplification
// here, as `auto_replenish` only cares about the time delta between two consecutive
// calls. This speeds up the verification significantly.
let size = kani::any_where(|n| *n < MAX_BUCKET_SIZE && *n != 0);
let complete_refill_time_ms = kani::any_where(|n| *n < MAX_REFILL_TIME && *n != 0);
// `auto_replenish` doesn't use `one_time_burst`
let mut bucket: TokenBucket = TokenBucket::new(size, 0, complete_refill_time_ms).unwrap();
bucket.auto_replenish();
assert!(bucket.is_valid());
}
#[kani::proof]
#[kani::stub(std::time::Instant::now, stubs::instant_now)]
#[kani::stub(TokenBucket::auto_replenish, stubs::token_bucket_auto_replenish)]
#[kani::stub_verified(gcd)]
#[kani::solver(cadical)]
fn verify_token_bucket_reduce() {
let mut token_bucket: TokenBucket = kani::any();
let old_token_bucket = token_bucket.clone();
let tokens = kani::any();
let result = token_bucket.reduce(tokens);
assert!(token_bucket.is_valid());
assert!(token_bucket.one_time_burst <= old_token_bucket.one_time_burst);
// Initial burst always gets used up before budget. Read assertion as implication, i.e.,
// `token_bucket.budget != old_token_bucket.budget => token_bucket.one_time_burst == 0`.
assert!(token_bucket.budget == old_token_bucket.budget || token_bucket.one_time_burst == 0);
// If reduction failed, bucket state should not change.
if result == BucketReduction::Failure {
// In case of a failure, no budget should have been consumed. However, since `reduce`
// attempts to call `auto_replenish`, the budget could actually have
// increased.
assert!(token_bucket.budget >= old_token_bucket.budget);
assert!(token_bucket.one_time_burst == old_token_bucket.one_time_burst);
// Ensure that it is possible to trigger the BucketReduction::Failure case at all.
// kani::cover makes verification fail if no possible execution path reaches
// this line.
kani::cover!();
}
}
#[kani::proof]
#[kani::stub(std::time::Instant::now, stubs::instant_now)]
#[kani::stub_verified(gcd)]
#[kani::stub(TokenBucket::auto_replenish, stubs::token_bucket_auto_replenish)]
fn verify_token_bucket_force_replenish() {
let mut token_bucket: TokenBucket = kani::any();
token_bucket.reduce(kani::any());
let reduced_budget = token_bucket.budget;
let reduced_burst = token_bucket.one_time_burst;
let to_replenish = kani::any();
token_bucket.force_replenish(to_replenish);
assert!(token_bucket.is_valid());
assert!(token_bucket.budget >= reduced_budget);
assert!(token_bucket.one_time_burst >= reduced_burst);
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::thread;
use std::time::Duration;
use super::*;
// Define custom refill interval to be a bit bigger. This will help
// in tests which wait for a limiter refill in 2 stages. This will make it so
// second wait will always result in the limiter being refilled. Otherwise
// there is a chance for a race condition between limiter refilling and limiter
// checking.
const TEST_REFILL_TIMER_DURATION: Duration = Duration::from_millis(110);
impl TokenBucket {
// Resets the token bucket: budget set to max capacity and last-updated set to now.
fn reset(&mut self) {
self.budget = self.size;
self.last_update = Instant::now();
}
fn get_last_update(&self) -> &Instant {
&self.last_update
}
fn get_processed_capacity(&self) -> u64 {
self.processed_capacity
}
fn get_processed_refill_time(&self) -> u64 {
self.processed_refill_time
}
// After a restore, we cannot be certain that the last_update field has the same value.
pub(crate) fn partial_eq(&self, other: &TokenBucket) -> bool {
(other.capacity() == self.capacity())
&& (other.one_time_burst() == self.one_time_burst())
&& (other.refill_time_ms() == self.refill_time_ms())
&& (other.budget() == self.budget())
}
}
impl RateLimiter {
fn get_token_bucket(&self, token_type: TokenType) -> Option<&TokenBucket> {
match token_type {
TokenType::Bytes => self.bandwidth.as_ref(),
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | true |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/test_utils/mod.rs | src/vmm/src/test_utils/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(missing_docs)]
use std::sync::{Arc, Mutex};
use vm_memory::{GuestAddress, GuestRegionCollection};
use vmm_sys_util::tempdir::TempDir;
use crate::builder::build_microvm_for_boot;
use crate::resources::VmResources;
use crate::seccomp::get_empty_filters;
use crate::test_utils::mock_resources::{MockBootSourceConfig, MockVmConfig, MockVmResources};
use crate::vmm_config::boot_source::BootSourceConfig;
use crate::vmm_config::instance_info::InstanceInfo;
use crate::vmm_config::machine_config::HugePageConfig;
use crate::vmm_config::memory_hotplug::MemoryHotplugConfig;
use crate::vstate::memory::{self, GuestMemoryMmap, GuestRegionMmap, GuestRegionMmapExt};
use crate::{EventManager, Vmm};
pub mod mock_resources;
/// Creates a [`GuestMemoryMmap`] with a single region of the given size starting at guest
/// physical address 0 and without dirty tracking.
pub fn single_region_mem(region_size: usize) -> GuestMemoryMmap {
single_region_mem_at(0, region_size)
}
pub fn single_region_mem_raw(region_size: usize) -> Vec<GuestRegionMmap> {
single_region_mem_at_raw(0, region_size)
}
/// Creates a [`GuestMemoryMmap`] with a single region of the given size starting at the given
/// guest physical address `at` and without dirty tracking.
pub fn single_region_mem_at(at: u64, size: usize) -> GuestMemoryMmap {
multi_region_mem(&[(GuestAddress(at), size)])
}
pub fn single_region_mem_at_raw(at: u64, size: usize) -> Vec<GuestRegionMmap> {
multi_region_mem_raw(&[(GuestAddress(at), size)])
}
/// Creates a [`GuestMemoryMmap`] with multiple regions and without dirty page tracking.
pub fn multi_region_mem(regions: &[(GuestAddress, usize)]) -> GuestMemoryMmap {
GuestRegionCollection::from_regions(
memory::anonymous(regions.iter().copied(), false, HugePageConfig::None)
.expect("Cannot initialize memory")
.into_iter()
.map(|region| GuestRegionMmapExt::dram_from_mmap_region(region, 0))
.collect(),
)
.unwrap()
}
pub fn multi_region_mem_raw(regions: &[(GuestAddress, usize)]) -> Vec<GuestRegionMmap> {
memory::anonymous(regions.iter().copied(), false, HugePageConfig::None)
.expect("Cannot initialize memory")
}
/// Creates a [`GuestMemoryMmap`] of the given size with the contained regions laid out in
/// accordance with the requirements of the architecture on which the tests are being run.
pub fn arch_mem(mem_size_bytes: usize) -> GuestMemoryMmap {
multi_region_mem(&crate::arch::arch_memory_regions(mem_size_bytes))
}
pub fn arch_mem_raw(mem_size_bytes: usize) -> Vec<GuestRegionMmap> {
multi_region_mem_raw(&crate::arch::arch_memory_regions(mem_size_bytes))
}
pub fn create_vmm(
_kernel_image: Option<&str>,
is_diff: bool,
boot_microvm: bool,
pci_enabled: bool,
memory_hotplug_enabled: bool,
) -> (Arc<Mutex<Vmm>>, EventManager) {
let mut event_manager = EventManager::new().unwrap();
let empty_seccomp_filters = get_empty_filters();
let boot_source_cfg = MockBootSourceConfig::new().with_default_boot_args();
#[cfg(target_arch = "aarch64")]
let boot_source_cfg: BootSourceConfig = boot_source_cfg.into();
#[cfg(target_arch = "x86_64")]
let boot_source_cfg: BootSourceConfig = match _kernel_image {
Some(kernel) => boot_source_cfg.with_kernel(kernel).into(),
None => boot_source_cfg.into(),
};
let mock_vm_res = MockVmResources::new().with_boot_source(boot_source_cfg);
let mut resources: VmResources = if is_diff {
mock_vm_res
.with_vm_config(MockVmConfig::new().with_dirty_page_tracking().into())
.into()
} else {
mock_vm_res.into()
};
resources.pci_enabled = pci_enabled;
if memory_hotplug_enabled {
resources.memory_hotplug = Some(MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 2,
slot_size_mib: 128,
});
}
let vmm = build_microvm_for_boot(
&InstanceInfo::default(),
&resources,
&mut event_manager,
&empty_seccomp_filters,
)
.unwrap();
if boot_microvm {
vmm.lock().unwrap().resume_vm().unwrap();
}
(vmm, event_manager)
}
pub fn default_vmm(kernel_image: Option<&str>) -> (Arc<Mutex<Vmm>>, EventManager) {
create_vmm(kernel_image, false, true, false, false)
}
pub fn default_vmm_no_boot(kernel_image: Option<&str>) -> (Arc<Mutex<Vmm>>, EventManager) {
create_vmm(kernel_image, false, false, false, false)
}
pub fn dirty_tracking_vmm(kernel_image: Option<&str>) -> (Arc<Mutex<Vmm>>, EventManager) {
create_vmm(kernel_image, true, true, false, false)
}
#[allow(clippy::undocumented_unsafe_blocks)]
#[allow(clippy::cast_possible_truncation)]
pub fn create_tmp_socket() -> (TempDir, String) {
let tmp_dir = TempDir::new().unwrap();
let tmp_dir_path_str = tmp_dir.as_path().to_str().unwrap();
let tmp_socket_path = format!("{tmp_dir_path_str}/tmp_socket");
unsafe {
let socketfd = libc::socket(libc::AF_UNIX, libc::SOCK_STREAM, 0);
if socketfd < 0 {
panic!("Cannot create socket");
}
let mut socket_addr = libc::sockaddr_un {
sun_family: libc::AF_UNIX as u16,
sun_path: [0; 108],
};
std::ptr::copy(
tmp_socket_path.as_ptr().cast(),
socket_addr.sun_path.as_mut_ptr(),
tmp_socket_path.len(),
);
let bind = libc::bind(
socketfd,
(&socket_addr as *const libc::sockaddr_un).cast(),
std::mem::size_of::<libc::sockaddr_un>() as u32,
);
if bind < 0 {
panic!("Cannot bind socket");
}
let listen = libc::listen(socketfd, 1);
if listen < 0 {
panic!("Cannot listen on socket");
}
}
(tmp_dir, tmp_socket_path)
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/test_utils/mock_resources/mod.rs | src/vmm/src/test_utils/mock_resources/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(missing_docs)]
use std::path::PathBuf;
use crate::cpu_config::templates::CustomCpuTemplate;
use crate::resources::VmResources;
use crate::vmm_config::boot_source::BootSourceConfig;
use crate::vmm_config::machine_config::{MachineConfig, MachineConfigUpdate};
pub const DEFAULT_BOOT_ARGS: &str = "reboot=k panic=1 pci=off";
#[cfg(target_arch = "x86_64")]
pub const DEFAULT_KERNEL_IMAGE: &str = "test_elf.bin";
#[cfg(target_arch = "aarch64")]
pub const DEFAULT_KERNEL_IMAGE: &str = "test_pe.bin";
#[cfg(target_arch = "x86_64")]
pub const NOISY_KERNEL_IMAGE: &str = "test_noisy_elf.bin";
#[cfg(target_arch = "aarch64")]
pub const NOISY_KERNEL_IMAGE: &str = "test_pe.bin";
pub fn kernel_image_path(kernel_image: Option<&str>) -> String {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("src/test_utils/mock_resources");
path.push(kernel_image.unwrap_or(DEFAULT_KERNEL_IMAGE));
path.as_os_str().to_str().unwrap().to_string()
}
macro_rules! generate_from {
($src_type: ty, $dst_type: ty) => {
impl From<$src_type> for $dst_type {
fn from(src: $src_type) -> $dst_type {
src.0
}
}
};
}
#[derive(Debug)]
pub struct MockBootSourceConfig(BootSourceConfig);
impl MockBootSourceConfig {
pub fn new() -> MockBootSourceConfig {
MockBootSourceConfig(BootSourceConfig {
kernel_image_path: kernel_image_path(None),
initrd_path: None,
boot_args: None,
})
}
pub fn with_default_boot_args(mut self) -> Self {
self.0.boot_args = Some(DEFAULT_BOOT_ARGS.to_string());
self
}
#[cfg(target_arch = "x86_64")]
pub fn with_kernel(mut self, kernel_image: &str) -> Self {
self.0.kernel_image_path = kernel_image_path(Some(kernel_image));
self
}
}
impl Default for MockBootSourceConfig {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Default)]
pub struct MockVmResources(VmResources);
impl MockVmResources {
pub fn new() -> MockVmResources {
MockVmResources::default()
}
pub fn with_boot_source(mut self, boot_source_cfg: BootSourceConfig) -> Self {
self.0.build_boot_source(boot_source_cfg).unwrap();
self
}
pub fn with_vm_config(mut self, vm_config: MachineConfig) -> Self {
let machine_config = MachineConfigUpdate::from(vm_config);
self.0.update_machine_config(&machine_config).unwrap();
self
}
pub fn set_cpu_template(&mut self, cpu_template: CustomCpuTemplate) {
self.0.machine_config.set_custom_cpu_template(cpu_template);
}
}
#[derive(Debug, Default)]
pub struct MockVmConfig(MachineConfig);
impl MockVmConfig {
pub fn new() -> MockVmConfig {
MockVmConfig::default()
}
pub fn with_dirty_page_tracking(mut self) -> Self {
self.0.track_dirty_pages = true;
self
}
}
generate_from!(MockBootSourceConfig, BootSourceConfig);
generate_from!(MockVmResources, VmResources);
generate_from!(MockVmConfig, MachineConfig);
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/device_manager/persist.rs | src/vmm/src/device_manager/persist.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Provides functionality for saving/restoring the MMIO device manager and its devices.
use std::fmt::{self, Debug};
use std::sync::{Arc, Mutex};
use event_manager::{MutEventSubscriber, SubscriberOps};
use log::{error, warn};
use serde::{Deserialize, Serialize};
use super::acpi::ACPIDeviceManager;
use super::mmio::*;
#[cfg(target_arch = "aarch64")]
use crate::arch::DeviceType;
use crate::device_manager::acpi::ACPIDeviceError;
#[cfg(target_arch = "x86_64")]
use crate::devices::acpi::vmclock::{VmClock, VmClockState};
use crate::devices::acpi::vmgenid::{VMGenIDState, VmGenId};
#[cfg(target_arch = "aarch64")]
use crate::devices::legacy::RTCDevice;
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::balloon::persist::{BalloonConstructorArgs, BalloonState};
use crate::devices::virtio::balloon::{Balloon, BalloonError};
use crate::devices::virtio::block::BlockError;
use crate::devices::virtio::block::device::Block;
use crate::devices::virtio::block::persist::{BlockConstructorArgs, BlockState};
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::generated::virtio_ids;
use crate::devices::virtio::mem::VirtioMem;
use crate::devices::virtio::mem::persist::{
VirtioMemConstructorArgs, VirtioMemPersistError, VirtioMemState,
};
use crate::devices::virtio::net::Net;
use crate::devices::virtio::net::persist::{
NetConstructorArgs, NetPersistError as NetError, NetState,
};
use crate::devices::virtio::persist::{MmioTransportConstructorArgs, MmioTransportState};
use crate::devices::virtio::pmem::device::Pmem;
use crate::devices::virtio::pmem::persist::{
PmemConstructorArgs, PmemPersistError as PmemError, PmemState,
};
use crate::devices::virtio::rng::Entropy;
use crate::devices::virtio::rng::persist::{
EntropyConstructorArgs, EntropyPersistError as EntropyError, EntropyState,
};
use crate::devices::virtio::transport::mmio::{IrqTrigger, MmioTransport};
use crate::devices::virtio::vsock::persist::{
VsockConstructorArgs, VsockState, VsockUdsConstructorArgs,
};
use crate::devices::virtio::vsock::{Vsock, VsockError, VsockUnixBackend, VsockUnixBackendError};
use crate::mmds::data_store::MmdsVersion;
use crate::resources::VmResources;
use crate::snapshot::Persist;
use crate::vmm_config::memory_hotplug::MemoryHotplugConfig;
use crate::vmm_config::mmds::MmdsConfigError;
use crate::vstate::bus::BusError;
use crate::vstate::memory::GuestMemoryMmap;
use crate::{EventManager, Vm};
/// Errors for (de)serialization of the MMIO device manager.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum DevicePersistError {
/// Balloon: {0}
Balloon(#[from] BalloonError),
/// Block: {0}
Block(#[from] BlockError),
/// Device manager: {0}
DeviceManager(#[from] super::mmio::MmioError),
/// Mmio transport
MmioTransport,
/// Bus error: {0}
Bus(#[from] BusError),
#[cfg(target_arch = "aarch64")]
/// Legacy: {0}
Legacy(#[from] std::io::Error),
/// Net: {0}
Net(#[from] NetError),
/// Vsock: {0}
Vsock(#[from] VsockError),
/// VsockUnixBackend: {0}
VsockUnixBackend(#[from] VsockUnixBackendError),
/// MmdsConfig: {0}
MmdsConfig(#[from] MmdsConfigError),
/// Entropy: {0}
Entropy(#[from] EntropyError),
/// Pmem: {0}
Pmem(#[from] PmemError),
/// virtio-mem: {0}
VirtioMem(#[from] VirtioMemPersistError),
/// Could not activate device: {0}
DeviceActivation(#[from] ActivateError),
}
/// Holds the state of a MMIO VirtIO device
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VirtioDeviceState<T> {
/// Device identifier.
pub device_id: String,
/// Device state.
pub device_state: T,
/// Mmio transport state.
pub transport_state: MmioTransportState,
/// VmmResources.
pub device_info: MMIODeviceInfo,
}
/// Holds the state of a legacy device connected to the MMIO space.
#[cfg(target_arch = "aarch64")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConnectedLegacyState {
/// Device identifier.
pub type_: DeviceType,
/// VmmResources.
pub device_info: MMIODeviceInfo,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MmdsState {
pub version: MmdsVersion,
pub imds_compat: bool,
}
/// Holds the device states.
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct DeviceStates {
#[cfg(target_arch = "aarch64")]
// State of legacy devices in MMIO space.
pub legacy_devices: Vec<ConnectedLegacyState>,
/// Block device states.
pub block_devices: Vec<VirtioDeviceState<BlockState>>,
/// Net device states.
pub net_devices: Vec<VirtioDeviceState<NetState>>,
/// Vsock device state.
pub vsock_device: Option<VirtioDeviceState<VsockState>>,
/// Balloon device state.
pub balloon_device: Option<VirtioDeviceState<BalloonState>>,
/// Mmds version.
pub mmds: Option<MmdsState>,
/// Entropy device state.
pub entropy_device: Option<VirtioDeviceState<EntropyState>>,
/// Pmem device states.
pub pmem_devices: Vec<VirtioDeviceState<PmemState>>,
/// Memory device state.
pub memory_device: Option<VirtioDeviceState<VirtioMemState>>,
}
pub struct MMIODevManagerConstructorArgs<'a> {
pub mem: &'a GuestMemoryMmap,
pub vm: &'a Arc<Vm>,
pub event_manager: &'a mut EventManager,
pub vm_resources: &'a mut VmResources,
pub instance_id: &'a str,
}
impl fmt::Debug for MMIODevManagerConstructorArgs<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MMIODevManagerConstructorArgs")
.field("mem", &self.mem)
.field("vm", &self.vm)
.field("event_manager", &"?")
.field("for_each_restored_device", &"?")
.field("vm_resources", &self.vm_resources)
.field("instance_id", &self.instance_id)
.finish()
}
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct ACPIDeviceManagerState {
vmgenid: VMGenIDState,
#[cfg(target_arch = "x86_64")]
vmclock: VmClockState,
}
impl<'a> Persist<'a> for ACPIDeviceManager {
type State = ACPIDeviceManagerState;
type ConstructorArgs = &'a Vm;
type Error = ACPIDeviceError;
fn save(&self) -> Self::State {
ACPIDeviceManagerState {
vmgenid: self.vmgenid.save(),
#[cfg(target_arch = "x86_64")]
vmclock: self.vmclock.save(),
}
}
fn restore(vm: Self::ConstructorArgs, state: &Self::State) -> Result<Self, Self::Error> {
let acpi_devices = ACPIDeviceManager {
// Safe to unwrap() here, this will never return an error.
vmgenid: VmGenId::restore((), &state.vmgenid).unwrap(),
// Safe to unwrap() here, this will never return an error.
#[cfg(target_arch = "x86_64")]
vmclock: VmClock::restore(vm.guest_memory(), &state.vmclock).unwrap(),
};
acpi_devices.attach_vmgenid(vm)?;
Ok(acpi_devices)
}
}
impl<'a> Persist<'a> for MMIODeviceManager {
type State = DeviceStates;
type ConstructorArgs = MMIODevManagerConstructorArgs<'a>;
type Error = DevicePersistError;
fn save(&self) -> Self::State {
let mut states = DeviceStates::default();
#[cfg(target_arch = "aarch64")]
{
if let Some(device) = &self.serial {
states.legacy_devices.push(ConnectedLegacyState {
type_: DeviceType::Serial,
device_info: device.resources,
});
}
if let Some(device) = &self.rtc {
states.legacy_devices.push(ConnectedLegacyState {
type_: DeviceType::Rtc,
device_info: device.resources,
});
}
}
let _: Result<(), ()> = self.for_each_virtio_device(|_, devid, device| {
let mmio_transport_locked = device.inner.lock().expect("Poisoned lock");
let transport_state = mmio_transport_locked.save();
let device_info = device.resources;
let device_id = devid.clone();
let mut locked_device = mmio_transport_locked.locked_device();
match locked_device.device_type() {
virtio_ids::VIRTIO_ID_BALLOON => {
let device_state = locked_device
.as_any()
.downcast_ref::<Balloon>()
.unwrap()
.save();
states.balloon_device = Some(VirtioDeviceState {
device_id,
device_state,
transport_state,
device_info,
});
}
// Both virtio-block and vhost-user-block share same device type.
virtio_ids::VIRTIO_ID_BLOCK => {
let block = locked_device.as_mut_any().downcast_mut::<Block>().unwrap();
if block.is_vhost_user() {
warn!(
"Skipping vhost-user-block device. VhostUserBlock does not support \
snapshotting yet"
);
} else {
block.prepare_save();
let device_state = block.save();
states.block_devices.push(VirtioDeviceState {
device_id,
device_state,
transport_state,
device_info,
});
}
}
virtio_ids::VIRTIO_ID_NET => {
let net = locked_device.as_mut_any().downcast_mut::<Net>().unwrap();
if let (Some(mmds_ns), None) = (net.mmds_ns.as_ref(), states.mmds.as_ref()) {
let mmds_guard = mmds_ns.mmds.lock().expect("Poisoned lock");
states.mmds = Some(MmdsState {
version: mmds_guard.version(),
imds_compat: mmds_guard.imds_compat(),
});
}
net.prepare_save();
let device_state = net.save();
states.net_devices.push(VirtioDeviceState {
device_id,
device_state,
transport_state,
device_info,
});
}
virtio_ids::VIRTIO_ID_VSOCK => {
let vsock = locked_device
.as_mut_any()
// Currently, VsockUnixBackend is the only implementation of VsockBackend.
.downcast_mut::<Vsock<VsockUnixBackend>>()
.unwrap();
// Send Transport event to reset connections if device
// is activated.
if vsock.is_activated() {
vsock.send_transport_reset_event().unwrap_or_else(|err| {
error!("Failed to send reset transport event: {:?}", err);
});
}
// Save state after potential notification to the guest. This
// way we save changes to the queue the notification can cause.
let device_state = VsockState {
backend: vsock.backend().save(),
frontend: vsock.save(),
};
states.vsock_device = Some(VirtioDeviceState {
device_id,
device_state,
transport_state,
device_info,
});
}
virtio_ids::VIRTIO_ID_RNG => {
let entropy = locked_device
.as_mut_any()
.downcast_mut::<Entropy>()
.unwrap();
let device_state = entropy.save();
states.entropy_device = Some(VirtioDeviceState {
device_id,
device_state,
transport_state,
device_info,
});
}
virtio_ids::VIRTIO_ID_PMEM => {
let pmem = locked_device.as_mut_any().downcast_mut::<Pmem>().unwrap();
let device_state = pmem.save();
states.pmem_devices.push(VirtioDeviceState {
device_id,
device_state,
transport_state,
device_info,
})
}
virtio_ids::VIRTIO_ID_MEM => {
let mem = locked_device
.as_mut_any()
.downcast_mut::<VirtioMem>()
.unwrap();
let device_state = mem.save();
states.memory_device = Some(VirtioDeviceState {
device_id,
device_state,
transport_state,
device_info,
});
}
_ => unreachable!(),
};
Ok(())
});
states
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let mut dev_manager = MMIODeviceManager::new();
let mem = constructor_args.mem;
let vm = constructor_args.vm;
#[cfg(target_arch = "aarch64")]
{
for state in &state.legacy_devices {
if state.type_ == DeviceType::Serial {
let serial = crate::DeviceManager::setup_serial_device(
constructor_args.event_manager,
constructor_args.vm_resources.serial_out_path.as_ref(),
)?;
dev_manager.register_mmio_serial(vm, serial, Some(state.device_info))?;
}
if state.type_ == DeviceType::Rtc {
let rtc = Arc::new(Mutex::new(RTCDevice::new()));
dev_manager.register_mmio_rtc(vm, rtc, Some(state.device_info))?;
}
}
}
let mut restore_helper = |device: Arc<Mutex<dyn VirtioDevice>>,
activated: bool,
is_vhost_user: bool,
as_subscriber: Arc<Mutex<dyn MutEventSubscriber>>,
id: &String,
state: &MmioTransportState,
device_info: &MMIODeviceInfo,
event_manager: &mut EventManager|
-> Result<(), Self::Error> {
let interrupt = Arc::new(IrqTrigger::new());
let restore_args = MmioTransportConstructorArgs {
mem: mem.clone(),
interrupt: interrupt.clone(),
device: device.clone(),
is_vhost_user,
};
let mmio_transport = Arc::new(Mutex::new(
MmioTransport::restore(restore_args, state)
.map_err(|()| DevicePersistError::MmioTransport)?,
));
dev_manager.register_mmio_virtio(
vm,
id.clone(),
MMIODevice {
resources: *device_info,
inner: mmio_transport,
},
)?;
if activated {
device
.lock()
.expect("Poisoned lock")
.activate(mem.clone(), interrupt)?;
}
event_manager.add_subscriber(as_subscriber);
Ok(())
};
if let Some(balloon_state) = &state.balloon_device {
let device = Arc::new(Mutex::new(Balloon::restore(
BalloonConstructorArgs { mem: mem.clone() },
&balloon_state.device_state,
)?));
constructor_args
.vm_resources
.balloon
.set_device(device.clone());
restore_helper(
device.clone(),
balloon_state.device_state.virtio_state.activated,
false,
device,
&balloon_state.device_id,
&balloon_state.transport_state,
&balloon_state.device_info,
constructor_args.event_manager,
)?;
}
for block_state in &state.block_devices {
let device = Arc::new(Mutex::new(Block::restore(
BlockConstructorArgs { mem: mem.clone() },
&block_state.device_state,
)?));
constructor_args
.vm_resources
.block
.add_virtio_device(device.clone());
restore_helper(
device.clone(),
block_state.device_state.is_activated(),
false,
device,
&block_state.device_id,
&block_state.transport_state,
&block_state.device_info,
constructor_args.event_manager,
)?;
}
// Initialize MMDS if MMDS state is included.
if let Some(mmds) = &state.mmds {
constructor_args.vm_resources.set_mmds_basic_config(
mmds.version,
mmds.imds_compat,
constructor_args.instance_id,
)?;
}
for net_state in &state.net_devices {
let device = Arc::new(Mutex::new(Net::restore(
NetConstructorArgs {
mem: mem.clone(),
mmds: constructor_args
.vm_resources
.mmds
.as_ref()
// Clone the Arc reference.
.cloned(),
},
&net_state.device_state,
)?));
constructor_args
.vm_resources
.net_builder
.add_device(device.clone());
restore_helper(
device.clone(),
net_state.device_state.virtio_state.activated,
false,
device,
&net_state.device_id,
&net_state.transport_state,
&net_state.device_info,
constructor_args.event_manager,
)?;
}
if let Some(vsock_state) = &state.vsock_device {
let ctor_args = VsockUdsConstructorArgs {
cid: vsock_state.device_state.frontend.cid,
};
let backend = VsockUnixBackend::restore(ctor_args, &vsock_state.device_state.backend)?;
let device = Arc::new(Mutex::new(Vsock::restore(
VsockConstructorArgs {
mem: mem.clone(),
backend,
},
&vsock_state.device_state.frontend,
)?));
constructor_args
.vm_resources
.vsock
.set_device(device.clone());
restore_helper(
device.clone(),
vsock_state.device_state.frontend.virtio_state.activated,
false,
device,
&vsock_state.device_id,
&vsock_state.transport_state,
&vsock_state.device_info,
constructor_args.event_manager,
)?;
}
if let Some(entropy_state) = &state.entropy_device {
let ctor_args = EntropyConstructorArgs { mem: mem.clone() };
let device = Arc::new(Mutex::new(Entropy::restore(
ctor_args,
&entropy_state.device_state,
)?));
constructor_args
.vm_resources
.entropy
.set_device(device.clone());
restore_helper(
device.clone(),
entropy_state.device_state.virtio_state.activated,
false,
device,
&entropy_state.device_id,
&entropy_state.transport_state,
&entropy_state.device_info,
constructor_args.event_manager,
)?;
}
for pmem_state in &state.pmem_devices {
let device = Arc::new(Mutex::new(Pmem::restore(
PmemConstructorArgs {
mem,
vm: vm.as_ref(),
},
&pmem_state.device_state,
)?));
constructor_args
.vm_resources
.pmem
.add_device(device.clone());
restore_helper(
device.clone(),
pmem_state.device_state.virtio_state.activated,
false,
device,
&pmem_state.device_id,
&pmem_state.transport_state,
&pmem_state.device_info,
constructor_args.event_manager,
)?;
}
if let Some(memory_state) = &state.memory_device {
let ctor_args = VirtioMemConstructorArgs::new(Arc::clone(vm));
let device = VirtioMem::restore(ctor_args, &memory_state.device_state)?;
constructor_args.vm_resources.memory_hotplug = Some(MemoryHotplugConfig {
total_size_mib: device.total_size_mib(),
block_size_mib: device.block_size_mib(),
slot_size_mib: device.slot_size_mib(),
});
let arcd_device = Arc::new(Mutex::new(device));
restore_helper(
arcd_device.clone(),
memory_state.device_state.virtio_state.activated,
false,
arcd_device,
&memory_state.device_id,
&memory_state.transport_state,
&memory_state.device_info,
constructor_args.event_manager,
)?;
}
Ok(dev_manager)
}
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::builder::tests::*;
use crate::device_manager;
use crate::devices::virtio::block::CacheType;
use crate::resources::VmmConfig;
use crate::snapshot::Snapshot;
use crate::vmm_config::balloon::BalloonDeviceConfig;
use crate::vmm_config::entropy::EntropyDeviceConfig;
use crate::vmm_config::memory_hotplug::MemoryHotplugConfig;
use crate::vmm_config::net::NetworkInterfaceConfig;
use crate::vmm_config::pmem::PmemConfig;
use crate::vmm_config::vsock::VsockDeviceConfig;
impl<T> PartialEq for VirtioDeviceState<T> {
fn eq(&self, other: &VirtioDeviceState<T>) -> bool {
// Actual device state equality is checked by the device's tests.
self.transport_state == other.transport_state && self.device_info == other.device_info
}
}
impl PartialEq for DeviceStates {
fn eq(&self, other: &DeviceStates) -> bool {
self.balloon_device == other.balloon_device
&& self.block_devices == other.block_devices
&& self.net_devices == other.net_devices
&& self.vsock_device == other.vsock_device
&& self.entropy_device == other.entropy_device
&& self.memory_device == other.memory_device
}
}
impl<T> PartialEq for MMIODevice<T> {
fn eq(&self, other: &Self) -> bool {
self.resources == other.resources
}
}
impl PartialEq for MMIODeviceManager {
fn eq(&self, other: &MMIODeviceManager) -> bool {
// We only care about the device hashmap.
if self.virtio_devices.len() != other.virtio_devices.len() {
return false;
}
for (key, val) in &self.virtio_devices {
match other.virtio_devices.get(key) {
Some(other_val) if val == other_val => continue,
_ => return false,
}
}
self.boot_timer == other.boot_timer
}
}
#[test]
fn test_device_manager_persistence() {
let mut buf = vec![0; 65536];
// These need to survive so the restored blocks find them.
let _block_files;
let _pmem_files;
let mut tmp_sock_file = TempFile::new().unwrap();
tmp_sock_file.remove().unwrap();
// Set up a vmm with one of each device, and get the serialized DeviceStates.
{
let mut event_manager = EventManager::new().expect("Unable to create EventManager");
let mut vmm = default_vmm();
let mut cmdline = default_kernel_cmdline();
// Add a balloon device.
let balloon_cfg = BalloonDeviceConfig {
amount_mib: 123,
deflate_on_oom: false,
stats_polling_interval_s: 1,
free_page_hinting: false,
free_page_reporting: false,
};
insert_balloon_device(&mut vmm, &mut cmdline, &mut event_manager, balloon_cfg);
// Add a block device.
let drive_id = String::from("root");
let block_configs = vec![CustomBlockConfig::new(
drive_id,
true,
None,
true,
CacheType::Unsafe,
)];
_block_files =
insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
// Add a net device.
let network_interface = NetworkInterfaceConfig {
iface_id: String::from("netif"),
host_dev_name: String::from("hostname"),
guest_mac: None,
rx_rate_limiter: None,
tx_rate_limiter: None,
};
insert_net_device_with_mmds(
&mut vmm,
&mut cmdline,
&mut event_manager,
network_interface,
MmdsVersion::V2,
);
// Add a vsock device.
let vsock_dev_id = "vsock";
let vsock_config = VsockDeviceConfig {
vsock_id: Some(vsock_dev_id.to_string()),
guest_cid: 3,
uds_path: tmp_sock_file.as_path().to_str().unwrap().to_string(),
};
insert_vsock_device(&mut vmm, &mut cmdline, &mut event_manager, vsock_config);
// Add an entropy device.
let entropy_config = EntropyDeviceConfig::default();
insert_entropy_device(&mut vmm, &mut cmdline, &mut event_manager, entropy_config);
// Add a pmem device.
let pmem_id = String::from("pmem");
let pmem_configs = vec![PmemConfig {
id: pmem_id,
path_on_host: "".into(),
root_device: true,
read_only: true,
}];
_pmem_files =
insert_pmem_devices(&mut vmm, &mut cmdline, &mut event_manager, pmem_configs);
let memory_hotplug_config = MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 2,
slot_size_mib: 128,
};
insert_virtio_mem_device(
&mut vmm,
&mut cmdline,
&mut event_manager,
memory_hotplug_config,
);
Snapshot::new(vmm.device_manager.save())
.save(&mut buf.as_mut_slice())
.unwrap();
}
tmp_sock_file.remove().unwrap();
let mut event_manager = EventManager::new().expect("Unable to create EventManager");
let vmm = default_vmm();
let device_manager_state: device_manager::DevicesState =
Snapshot::load_without_crc_check(buf.as_slice())
.unwrap()
.data;
let vm_resources = &mut VmResources::default();
let restore_args = MMIODevManagerConstructorArgs {
mem: vmm.vm.guest_memory(),
vm: &vmm.vm,
event_manager: &mut event_manager,
vm_resources,
instance_id: "microvm-id",
};
let _restored_dev_manager =
MMIODeviceManager::restore(restore_args, &device_manager_state.mmio_state).unwrap();
let expected_vm_resources = format!(
r#"{{
"balloon": {{
"amount_mib": 123,
"deflate_on_oom": false,
"stats_polling_interval_s": 1,
"free_page_hinting": false,
"free_page_reporting": false
}},
"drives": [
{{
"drive_id": "root",
"partuuid": null,
"is_root_device": true,
"cache_type": "Unsafe",
"is_read_only": true,
"path_on_host": "{}",
"rate_limiter": null,
"io_engine": "Sync",
"socket": null
}}
],
"boot-source": {{
"kernel_image_path": "",
"initrd_path": null,
"boot_args": null
}},
"cpu-config": null,
"logger": null,
"machine-config": {{
"vcpu_count": 1,
"mem_size_mib": 128,
"smt": false,
"track_dirty_pages": false,
"huge_pages": "None"
}},
"metrics": null,
"mmds-config": {{
"version": "V2",
"network_interfaces": [
"netif"
],
"ipv4_address": "169.254.169.254",
"imds_compat": false
}},
"network-interfaces": [
{{
"iface_id": "netif",
"host_dev_name": "hostname",
"guest_mac": null,
"rx_rate_limiter": null,
"tx_rate_limiter": null
}}
],
"vsock": {{
"guest_cid": 3,
"uds_path": "{}"
}},
"entropy": {{
"rate_limiter": null
}},
"pmem": [
{{
"id": "pmem",
"path_on_host": "{}",
"root_device": true,
"read_only": true
}}
],
"memory-hotplug": {{
"total_size_mib": 1024,
"block_size_mib": 2,
"slot_size_mib": 128
}}
}}"#,
_block_files.last().unwrap().as_path().to_str().unwrap(),
tmp_sock_file.as_path().to_str().unwrap(),
_pmem_files.last().unwrap().as_path().to_str().unwrap(),
);
assert_eq!(
vm_resources
.mmds
.as_ref()
.unwrap()
.lock()
.unwrap()
.version(),
MmdsVersion::V2
);
assert_eq!(
device_manager_state.mmio_state.mmds.unwrap().version,
MmdsVersion::V2
);
assert_eq!(
expected_vm_resources,
serde_json::to_string_pretty(&VmmConfig::from(&*vm_resources)).unwrap()
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/device_manager/acpi.rs | src/vmm/src/device_manager/acpi.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use acpi_tables::{Aml, aml};
use vm_memory::GuestMemoryError;
use crate::Vm;
#[cfg(target_arch = "x86_64")]
use crate::devices::acpi::vmclock::VmClock;
use crate::devices::acpi::vmgenid::VmGenId;
use crate::vstate::resources::ResourceAllocator;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum ACPIDeviceError {
/// Could not register GSI with KVM: {0}
RegisterIrq(#[from] kvm_ioctls::Error),
/// Could not write to guest memory: {0}
WriteGuestMemory(#[from] GuestMemoryError),
}
#[derive(Debug)]
pub struct ACPIDeviceManager {
/// VMGenID device
pub vmgenid: VmGenId,
/// VMclock device
#[cfg(target_arch = "x86_64")]
pub vmclock: VmClock,
}
impl ACPIDeviceManager {
/// Create a new ACPIDeviceManager object
pub fn new(resource_allocator: &mut ResourceAllocator) -> Self {
ACPIDeviceManager {
vmgenid: VmGenId::new(resource_allocator),
#[cfg(target_arch = "x86_64")]
vmclock: VmClock::new(resource_allocator),
}
}
pub fn attach_vmgenid(&self, vm: &Vm) -> Result<(), ACPIDeviceError> {
vm.register_irq(&self.vmgenid.interrupt_evt, self.vmgenid.gsi)?;
self.vmgenid.activate(vm.guest_memory())?;
Ok(())
}
#[cfg(target_arch = "x86_64")]
pub fn attach_vmclock(&self, vm: &Vm) -> Result<(), ACPIDeviceError> {
self.vmclock.activate(vm.guest_memory())?;
Ok(())
}
}
impl Aml for ACPIDeviceManager {
fn append_aml_bytes(&self, v: &mut Vec<u8>) -> Result<(), aml::AmlError> {
// AML for [`VmGenId`] device.
self.vmgenid.append_aml_bytes(v)?;
// AML for [`VmClock`] device.
#[cfg(target_arch = "x86_64")]
self.vmclock.append_aml_bytes(v)?;
// Create the AML for the GED interrupt handler
aml::Device::new(
"_SB_.GED_".try_into()?,
vec![
&aml::Name::new("_HID".try_into()?, &"ACPI0013")?,
&aml::Name::new(
"_CRS".try_into()?,
&aml::ResourceTemplate::new(vec![&aml::Interrupt::new(
true,
true,
false,
false,
self.vmgenid.gsi,
)]),
)?,
&aml::Method::new(
"_EVT".try_into()?,
1,
true,
vec![&aml::If::new(
// We know that the maximum IRQ number fits in a u8. We have up to
// 32 IRQs in x86 and up to 128 in
// ARM (look into
// `vmm::crate::arch::layout::GSI_LEGACY_END`)
#[allow(clippy::cast_possible_truncation)]
&aml::Equal::new(&aml::Arg(0), &(self.vmgenid.gsi as u8)),
vec![&aml::Notify::new(
&aml::Path::new("\\_SB_.VGEN")?,
&0x80usize,
)],
)],
),
],
)
.append_aml_bytes(v)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/device_manager/pci_mngr.rs | src/vmm/src/device_manager/pci_mngr.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::fmt::Debug;
use std::ops::DerefMut;
use std::sync::{Arc, Mutex};
use event_manager::{MutEventSubscriber, SubscriberOps};
use log::{debug, error, warn};
use serde::{Deserialize, Serialize};
use super::persist::MmdsState;
use crate::devices::pci::PciSegment;
use crate::devices::virtio::balloon::Balloon;
use crate::devices::virtio::balloon::persist::{BalloonConstructorArgs, BalloonState};
use crate::devices::virtio::block::device::Block;
use crate::devices::virtio::block::persist::{BlockConstructorArgs, BlockState};
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::generated::virtio_ids;
use crate::devices::virtio::mem::VirtioMem;
use crate::devices::virtio::mem::persist::{VirtioMemConstructorArgs, VirtioMemState};
use crate::devices::virtio::net::Net;
use crate::devices::virtio::net::persist::{NetConstructorArgs, NetState};
use crate::devices::virtio::pmem::device::Pmem;
use crate::devices::virtio::pmem::persist::{PmemConstructorArgs, PmemState};
use crate::devices::virtio::rng::Entropy;
use crate::devices::virtio::rng::persist::{EntropyConstructorArgs, EntropyState};
use crate::devices::virtio::transport::pci::device::{
CAPABILITY_BAR_SIZE, VirtioPciDevice, VirtioPciDeviceError, VirtioPciDeviceState,
};
use crate::devices::virtio::vsock::persist::{
VsockConstructorArgs, VsockState, VsockUdsConstructorArgs,
};
use crate::devices::virtio::vsock::{Vsock, VsockUnixBackend};
use crate::pci::bus::PciRootError;
use crate::resources::VmResources;
use crate::snapshot::Persist;
use crate::vmm_config::memory_hotplug::MemoryHotplugConfig;
use crate::vmm_config::mmds::MmdsConfigError;
use crate::vstate::bus::BusError;
use crate::vstate::interrupts::InterruptError;
use crate::vstate::memory::GuestMemoryMmap;
use crate::{EventManager, Vm};
#[derive(Debug, Default)]
pub struct PciDevices {
/// PCIe segment of the VMM, if PCI is enabled. We currently support a single PCIe segment.
pub pci_segment: Option<PciSegment>,
/// All VirtIO PCI devices of the system
pub virtio_devices: HashMap<(u32, String), Arc<Mutex<VirtioPciDevice>>>,
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum PciManagerError {
/// Resource allocation error: {0}
ResourceAllocation(#[from] vm_allocator::Error),
/// Bus error: {0}
Bus(#[from] BusError),
/// PCI root error: {0}
PciRoot(#[from] PciRootError),
/// MSI error: {0}
Msi(#[from] InterruptError),
/// VirtIO PCI device error: {0}
VirtioPciDevice(#[from] VirtioPciDeviceError),
/// KVM error: {0}
Kvm(#[from] vmm_sys_util::errno::Error),
/// MMDS error: {0}
Mmds(#[from] MmdsConfigError),
}
impl PciDevices {
pub fn new() -> Self {
Default::default()
}
pub fn attach_pci_segment(&mut self, vm: &Arc<Vm>) -> Result<(), PciManagerError> {
// We only support a single PCIe segment. Calling this function twice is a Firecracker
// internal error.
assert!(self.pci_segment.is_none());
// Currently we don't assign any IRQs to PCI devices. We will be using MSI-X interrupts
// only.
let pci_segment = PciSegment::new(0, vm, &[0u8; 32])?;
self.pci_segment = Some(pci_segment);
Ok(())
}
fn register_bars_with_bus(
vm: &Vm,
virtio_device: &Arc<Mutex<VirtioPciDevice>>,
) -> Result<(), PciManagerError> {
let virtio_device_locked = virtio_device.lock().expect("Poisoned lock");
debug!(
"Inserting MMIO BAR region: {:#x}:{:#x}",
virtio_device_locked.bar_address, CAPABILITY_BAR_SIZE
);
vm.common.mmio_bus.insert(
virtio_device.clone(),
virtio_device_locked.bar_address,
CAPABILITY_BAR_SIZE,
)?;
Ok(())
}
pub(crate) fn attach_pci_virtio_device<
T: 'static + VirtioDevice + MutEventSubscriber + Debug,
>(
&mut self,
vm: &Arc<Vm>,
id: String,
device: Arc<Mutex<T>>,
) -> Result<(), PciManagerError> {
// We should only be reaching this point if PCI is enabled
let pci_segment = self.pci_segment.as_ref().unwrap();
let pci_device_bdf = pci_segment.next_device_bdf()?;
debug!("Allocating BDF: {pci_device_bdf:?} for device");
let mem = vm.guest_memory().clone();
let device_type: u32 = device.lock().expect("Poisoned lock").device_type();
// Allocate one MSI vector per queue, plus one for configuration
let msix_num =
u16::try_from(device.lock().expect("Poisoned lock").queues().len() + 1).unwrap();
let msix_vectors = Vm::create_msix_group(vm.clone(), msix_num)?;
// Create the transport
let mut virtio_device = VirtioPciDevice::new(
id.clone(),
mem,
device,
Arc::new(msix_vectors),
pci_device_bdf.into(),
)?;
// Allocate bars
let mut resource_allocator_lock = vm.resource_allocator();
let resource_allocator = resource_allocator_lock.deref_mut();
virtio_device.allocate_bars(&mut resource_allocator.mmio64_memory);
let virtio_device = Arc::new(Mutex::new(virtio_device));
pci_segment
.pci_bus
.lock()
.expect("Poisoned lock")
.add_device(pci_device_bdf.device() as u32, virtio_device.clone());
self.virtio_devices
.insert((device_type, id.clone()), virtio_device.clone());
Self::register_bars_with_bus(vm, &virtio_device)?;
virtio_device
.lock()
.expect("Poisoned lock")
.register_notification_ioevent(vm)?;
Ok(())
}
fn restore_pci_device<T: 'static + VirtioDevice + MutEventSubscriber + Debug>(
&mut self,
vm: &Arc<Vm>,
device: Arc<Mutex<T>>,
device_id: &str,
transport_state: &VirtioPciDeviceState,
event_manager: &mut EventManager,
) -> Result<(), PciManagerError> {
// We should only be reaching this point if PCI is enabled
let pci_segment = self.pci_segment.as_ref().unwrap();
let device_type: u32 = device.lock().expect("Poisoned lock").device_type();
let virtio_device = Arc::new(Mutex::new(VirtioPciDevice::new_from_state(
device_id.to_string(),
vm,
device.clone(),
transport_state.clone(),
)?));
pci_segment
.pci_bus
.lock()
.expect("Poisoned lock")
.add_device(
transport_state.pci_device_bdf.device() as u32,
virtio_device.clone(),
);
self.virtio_devices
.insert((device_type, device_id.to_string()), virtio_device.clone());
Self::register_bars_with_bus(vm, &virtio_device)?;
virtio_device
.lock()
.expect("Poisoned lock")
.register_notification_ioevent(vm)?;
event_manager.add_subscriber(device);
Ok(())
}
/// Gets the specified device.
pub fn get_virtio_device(
&self,
device_type: u32,
device_id: &str,
) -> Option<&Arc<Mutex<VirtioPciDevice>>> {
self.virtio_devices
.get(&(device_type, device_id.to_string()))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VirtioDeviceState<T> {
/// Device identifier
pub device_id: String,
/// Device BDF
pub pci_device_bdf: u32,
/// Device state
pub device_state: T,
/// Transport state
pub transport_state: VirtioPciDeviceState,
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct PciDevicesState {
/// Whether PCI is enabled
pub pci_enabled: bool,
/// Block device states.
pub block_devices: Vec<VirtioDeviceState<BlockState>>,
/// Net device states.
pub net_devices: Vec<VirtioDeviceState<NetState>>,
/// Vsock device state.
pub vsock_device: Option<VirtioDeviceState<VsockState>>,
/// Balloon device state.
pub balloon_device: Option<VirtioDeviceState<BalloonState>>,
/// Mmds state.
pub mmds: Option<MmdsState>,
/// Entropy device state.
pub entropy_device: Option<VirtioDeviceState<EntropyState>>,
/// Pmem device states.
pub pmem_devices: Vec<VirtioDeviceState<PmemState>>,
/// Memory device state.
pub memory_device: Option<VirtioDeviceState<VirtioMemState>>,
}
pub struct PciDevicesConstructorArgs<'a> {
pub vm: &'a Arc<Vm>,
pub mem: &'a GuestMemoryMmap,
pub vm_resources: &'a mut VmResources,
pub instance_id: &'a str,
pub event_manager: &'a mut EventManager,
}
impl<'a> Debug for PciDevicesConstructorArgs<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PciDevicesConstructorArgs")
.field("vm", &self.vm)
.field("mem", &self.mem)
.field("vm_resources", &self.vm_resources)
.field("instance_id", &self.instance_id)
.finish()
}
}
impl<'a> Persist<'a> for PciDevices {
type State = PciDevicesState;
type ConstructorArgs = PciDevicesConstructorArgs<'a>;
type Error = PciManagerError;
fn save(&self) -> Self::State {
let mut state = PciDevicesState::default();
if self.pci_segment.is_some() {
state.pci_enabled = true;
} else {
return state;
}
for pci_dev in self.virtio_devices.values() {
let locked_pci_dev = pci_dev.lock().expect("Poisoned lock");
let transport_state = locked_pci_dev.state();
let virtio_dev = locked_pci_dev.virtio_device();
let mut locked_virtio_dev = virtio_dev.lock().expect("Poisoned lock");
let pci_device_bdf = transport_state.pci_device_bdf.into();
match locked_virtio_dev.device_type() {
virtio_ids::VIRTIO_ID_BALLOON => {
let balloon_device = locked_virtio_dev
.as_any()
.downcast_ref::<Balloon>()
.unwrap();
let device_state = balloon_device.save();
state.balloon_device = Some(VirtioDeviceState {
device_id: balloon_device.id().to_string(),
pci_device_bdf,
device_state,
transport_state,
});
}
virtio_ids::VIRTIO_ID_BLOCK => {
let block_dev = locked_virtio_dev
.as_mut_any()
.downcast_mut::<Block>()
.unwrap();
if block_dev.is_vhost_user() {
warn!(
"Skipping vhost-user-block device. VhostUserBlock does not support \
snapshotting yet"
);
} else {
block_dev.prepare_save();
let device_state = block_dev.save();
state.block_devices.push(VirtioDeviceState {
device_id: block_dev.id().to_string(),
pci_device_bdf,
device_state,
transport_state,
});
}
}
virtio_ids::VIRTIO_ID_NET => {
let net_dev = locked_virtio_dev
.as_mut_any()
.downcast_mut::<Net>()
.unwrap();
if let (Some(mmds_ns), None) = (net_dev.mmds_ns.as_ref(), state.mmds.as_ref()) {
let mmds_guard = mmds_ns.mmds.lock().expect("Poisoned lock");
state.mmds = Some(MmdsState {
version: mmds_guard.version(),
imds_compat: mmds_guard.imds_compat(),
});
}
net_dev.prepare_save();
let device_state = net_dev.save();
state.net_devices.push(VirtioDeviceState {
device_id: net_dev.id().to_string(),
pci_device_bdf,
device_state,
transport_state,
})
}
virtio_ids::VIRTIO_ID_VSOCK => {
let vsock_dev = locked_virtio_dev
.as_mut_any()
// Currently, VsockUnixBackend is the only implementation of VsockBackend.
.downcast_mut::<Vsock<VsockUnixBackend>>()
.unwrap();
// Send Transport event to reset connections if device
// is activated.
if vsock_dev.is_activated() {
vsock_dev
.send_transport_reset_event()
.unwrap_or_else(|err| {
error!("Failed to send reset transport event: {:?}", err);
});
}
// Save state after potential notification to the guest. This
// way we save changes to the queue the notification can cause.
let vsock_state = VsockState {
backend: vsock_dev.backend().save(),
frontend: vsock_dev.save(),
};
state.vsock_device = Some(VirtioDeviceState {
device_id: vsock_dev.id().to_string(),
pci_device_bdf,
device_state: vsock_state,
transport_state,
});
}
virtio_ids::VIRTIO_ID_RNG => {
let rng_dev = locked_virtio_dev
.as_mut_any()
.downcast_mut::<Entropy>()
.unwrap();
let device_state = rng_dev.save();
state.entropy_device = Some(VirtioDeviceState {
device_id: rng_dev.id().to_string(),
pci_device_bdf,
device_state,
transport_state,
})
}
virtio_ids::VIRTIO_ID_PMEM => {
let pmem_dev = locked_virtio_dev
.as_mut_any()
.downcast_mut::<Pmem>()
.unwrap();
let device_state = pmem_dev.save();
state.pmem_devices.push(VirtioDeviceState {
device_id: pmem_dev.config.id.clone(),
pci_device_bdf,
device_state,
transport_state,
});
}
virtio_ids::VIRTIO_ID_MEM => {
let mem_dev = locked_virtio_dev
.as_mut_any()
.downcast_mut::<VirtioMem>()
.unwrap();
let device_state = mem_dev.save();
state.memory_device = Some(VirtioDeviceState {
device_id: mem_dev.id().to_string(),
pci_device_bdf,
device_state,
transport_state,
})
}
_ => unreachable!(),
}
}
state
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
let mem = constructor_args.mem;
let mut pci_devices = PciDevices::new();
if !state.pci_enabled {
return Ok(pci_devices);
}
pci_devices.attach_pci_segment(constructor_args.vm)?;
if let Some(balloon_state) = &state.balloon_device {
let device = Arc::new(Mutex::new(
Balloon::restore(
BalloonConstructorArgs { mem: mem.clone() },
&balloon_state.device_state,
)
.unwrap(),
));
constructor_args
.vm_resources
.balloon
.set_device(device.clone());
pci_devices
.restore_pci_device(
constructor_args.vm,
device,
&balloon_state.device_id,
&balloon_state.transport_state,
constructor_args.event_manager,
)
.unwrap()
}
for block_state in &state.block_devices {
let device = Arc::new(Mutex::new(
Block::restore(
BlockConstructorArgs { mem: mem.clone() },
&block_state.device_state,
)
.unwrap(),
));
constructor_args
.vm_resources
.block
.add_virtio_device(device.clone());
pci_devices
.restore_pci_device(
constructor_args.vm,
device,
&block_state.device_id,
&block_state.transport_state,
constructor_args.event_manager,
)
.unwrap()
}
// Initialize MMDS if MMDS state is included.
if let Some(mmds) = &state.mmds {
constructor_args
.vm_resources
.set_mmds_basic_config(mmds.version, mmds.imds_compat, constructor_args.instance_id)
.unwrap();
} else if state
.net_devices
.iter()
.any(|dev| dev.device_state.mmds_ns.is_some())
{
// If there's at least one network device having an mmds_ns, it means
// that we are restoring from a version that did not persist the `MmdsVersionState`.
// Init with the default.
constructor_args.vm_resources.mmds_or_default()?;
}
for net_state in &state.net_devices {
let device = Arc::new(Mutex::new(
Net::restore(
NetConstructorArgs {
mem: mem.clone(),
mmds: constructor_args
.vm_resources
.mmds
.as_ref()
// Clone the Arc reference.
.cloned(),
},
&net_state.device_state,
)
.unwrap(),
));
constructor_args
.vm_resources
.net_builder
.add_device(device.clone());
pci_devices
.restore_pci_device(
constructor_args.vm,
device,
&net_state.device_id,
&net_state.transport_state,
constructor_args.event_manager,
)
.unwrap()
}
if let Some(vsock_state) = &state.vsock_device {
let ctor_args = VsockUdsConstructorArgs {
cid: vsock_state.device_state.frontend.cid,
};
let backend =
VsockUnixBackend::restore(ctor_args, &vsock_state.device_state.backend).unwrap();
let device = Arc::new(Mutex::new(
Vsock::restore(
VsockConstructorArgs {
mem: mem.clone(),
backend,
},
&vsock_state.device_state.frontend,
)
.unwrap(),
));
constructor_args
.vm_resources
.vsock
.set_device(device.clone());
pci_devices
.restore_pci_device(
constructor_args.vm,
device,
&vsock_state.device_id,
&vsock_state.transport_state,
constructor_args.event_manager,
)
.unwrap()
}
if let Some(entropy_state) = &state.entropy_device {
let ctor_args = EntropyConstructorArgs { mem: mem.clone() };
let device = Arc::new(Mutex::new(
Entropy::restore(ctor_args, &entropy_state.device_state).unwrap(),
));
constructor_args
.vm_resources
.entropy
.set_device(device.clone());
pci_devices
.restore_pci_device(
constructor_args.vm,
device,
&entropy_state.device_id,
&entropy_state.transport_state,
constructor_args.event_manager,
)
.unwrap()
}
for pmem_state in &state.pmem_devices {
let device = Arc::new(Mutex::new(
Pmem::restore(
PmemConstructorArgs {
mem,
vm: constructor_args.vm.as_ref(),
},
&pmem_state.device_state,
)
.unwrap(),
));
constructor_args
.vm_resources
.pmem
.add_device(device.clone());
pci_devices
.restore_pci_device(
constructor_args.vm,
device,
&pmem_state.device_id,
&pmem_state.transport_state,
constructor_args.event_manager,
)
.unwrap()
}
if let Some(memory_device) = &state.memory_device {
let ctor_args = VirtioMemConstructorArgs::new(Arc::clone(constructor_args.vm));
let device = VirtioMem::restore(ctor_args, &memory_device.device_state).unwrap();
constructor_args.vm_resources.memory_hotplug = Some(MemoryHotplugConfig {
total_size_mib: device.total_size_mib(),
block_size_mib: device.block_size_mib(),
slot_size_mib: device.slot_size_mib(),
});
let arcd_device = Arc::new(Mutex::new(device));
pci_devices
.restore_pci_device(
constructor_args.vm,
arcd_device,
&memory_device.device_id,
&memory_device.transport_state,
constructor_args.event_manager,
)
.unwrap()
}
Ok(pci_devices)
}
}
#[cfg(test)]
mod tests {
use vmm_sys_util::tempfile::TempFile;
use super::*;
use crate::builder::tests::*;
use crate::device_manager;
use crate::devices::virtio::block::CacheType;
use crate::mmds::data_store::MmdsVersion;
use crate::resources::VmmConfig;
use crate::snapshot::Snapshot;
use crate::vmm_config::balloon::BalloonDeviceConfig;
use crate::vmm_config::entropy::EntropyDeviceConfig;
use crate::vmm_config::memory_hotplug::MemoryHotplugConfig;
use crate::vmm_config::net::NetworkInterfaceConfig;
use crate::vmm_config::pmem::PmemConfig;
use crate::vmm_config::vsock::VsockDeviceConfig;
#[test]
fn test_device_manager_persistence() {
let mut buf = vec![0; 65536];
// These need to survive so the restored blocks find them.
let _block_files;
let _pmem_files;
let mut tmp_sock_file = TempFile::new().unwrap();
tmp_sock_file.remove().unwrap();
// Set up a vmm with one of each device, and get the serialized DeviceStates.
{
let mut event_manager = EventManager::new().expect("Unable to create EventManager");
let mut vmm = default_vmm();
vmm.device_manager.enable_pci(&vmm.vm).unwrap();
let mut cmdline = default_kernel_cmdline();
// Add a balloon device.
let balloon_cfg = BalloonDeviceConfig {
amount_mib: 123,
deflate_on_oom: false,
stats_polling_interval_s: 1,
free_page_hinting: false,
free_page_reporting: false,
};
insert_balloon_device(&mut vmm, &mut cmdline, &mut event_manager, balloon_cfg);
// Add a block device.
let drive_id = String::from("root");
let block_configs = vec![CustomBlockConfig::new(
drive_id,
true,
None,
true,
CacheType::Unsafe,
)];
_block_files =
insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs);
// Add a net device.
let network_interface = NetworkInterfaceConfig {
iface_id: String::from("netif"),
host_dev_name: String::from("hostname"),
guest_mac: None,
rx_rate_limiter: None,
tx_rate_limiter: None,
};
insert_net_device_with_mmds(
&mut vmm,
&mut cmdline,
&mut event_manager,
network_interface,
MmdsVersion::V2,
);
// Add a vsock device.
let vsock_dev_id = "vsock";
let vsock_config = VsockDeviceConfig {
vsock_id: Some(vsock_dev_id.to_string()),
guest_cid: 3,
uds_path: tmp_sock_file.as_path().to_str().unwrap().to_string(),
};
insert_vsock_device(&mut vmm, &mut cmdline, &mut event_manager, vsock_config);
// Add an entropy device.
let entropy_config = EntropyDeviceConfig::default();
insert_entropy_device(&mut vmm, &mut cmdline, &mut event_manager, entropy_config);
// Add a pmem device.
let pmem_id = String::from("pmem");
let pmem_configs = vec![PmemConfig {
id: pmem_id,
path_on_host: "".into(),
root_device: true,
read_only: true,
}];
_pmem_files =
insert_pmem_devices(&mut vmm, &mut cmdline, &mut event_manager, pmem_configs);
let memory_hotplug_config = MemoryHotplugConfig {
total_size_mib: 1024,
block_size_mib: 2,
slot_size_mib: 128,
};
insert_virtio_mem_device(
&mut vmm,
&mut cmdline,
&mut event_manager,
memory_hotplug_config,
);
Snapshot::new(vmm.device_manager.save())
.save(&mut buf.as_mut_slice())
.unwrap();
}
tmp_sock_file.remove().unwrap();
let mut event_manager = EventManager::new().expect("Unable to create EventManager");
// Keep in mind we are re-creating here an empty DeviceManager. Restoring later on
// will create a new PciDevices manager different than vmm.pci_devices. We're doing
// this to avoid restoring the whole Vmm, since what we really need from Vmm is the Vm
// object and calling default_vmm() is the easiest way to create one.
let vmm = default_vmm();
let device_manager_state: device_manager::DevicesState =
Snapshot::load_without_crc_check(buf.as_slice())
.unwrap()
.data;
let vm_resources = &mut VmResources::default();
let restore_args = PciDevicesConstructorArgs {
vm: &vmm.vm,
mem: vmm.vm.guest_memory(),
vm_resources,
instance_id: "microvm-id",
event_manager: &mut event_manager,
};
let _restored_dev_manager =
PciDevices::restore(restore_args, &device_manager_state.pci_state).unwrap();
let expected_vm_resources = format!(
r#"{{
"balloon": {{
"amount_mib": 123,
"deflate_on_oom": false,
"stats_polling_interval_s": 1,
"free_page_hinting": false,
"free_page_reporting": false
}},
"drives": [
{{
"drive_id": "root",
"partuuid": null,
"is_root_device": true,
"cache_type": "Unsafe",
"is_read_only": true,
"path_on_host": "{}",
"rate_limiter": null,
"io_engine": "Sync",
"socket": null
}}
],
"boot-source": {{
"kernel_image_path": "",
"initrd_path": null,
"boot_args": null
}},
"cpu-config": null,
"logger": null,
"machine-config": {{
"vcpu_count": 1,
"mem_size_mib": 128,
"smt": false,
"track_dirty_pages": false,
"huge_pages": "None"
}},
"metrics": null,
"mmds-config": {{
"version": "V2",
"network_interfaces": [
"netif"
],
"ipv4_address": "169.254.169.254",
"imds_compat": false
}},
"network-interfaces": [
{{
"iface_id": "netif",
"host_dev_name": "hostname",
"guest_mac": null,
"rx_rate_limiter": null,
"tx_rate_limiter": null
}}
],
"vsock": {{
"guest_cid": 3,
"uds_path": "{}"
}},
"entropy": {{
"rate_limiter": null
}},
"pmem": [
{{
"id": "pmem",
"path_on_host": "{}",
"root_device": true,
"read_only": true
}}
],
"memory-hotplug": {{
"total_size_mib": 1024,
"block_size_mib": 2,
"slot_size_mib": 128
}}
}}"#,
_block_files.last().unwrap().as_path().to_str().unwrap(),
tmp_sock_file.as_path().to_str().unwrap(),
_pmem_files.last().unwrap().as_path().to_str().unwrap(),
);
assert_eq!(
vm_resources
.mmds
.as_ref()
.unwrap()
.lock()
.unwrap()
.version(),
MmdsVersion::V2
);
assert_eq!(
device_manager_state.pci_state.mmds.unwrap().version,
MmdsVersion::V2
);
assert_eq!(
expected_vm_resources,
serde_json::to_string_pretty(&VmmConfig::from(&*vm_resources)).unwrap()
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/device_manager/mod.rs | src/vmm/src/device_manager/mod.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::convert::Infallible;
use std::fmt::Debug;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use acpi::ACPIDeviceManager;
use event_manager::{MutEventSubscriber, SubscriberOps};
#[cfg(target_arch = "x86_64")]
use legacy::{LegacyDeviceError, PortIODeviceManager};
use linux_loader::loader::Cmdline;
use log::{error, info};
use mmio::{MMIODeviceManager, MmioError};
use pci_mngr::{PciDevices, PciDevicesConstructorArgs, PciManagerError};
use persist::MMIODevManagerConstructorArgs;
use serde::{Deserialize, Serialize};
use utils::time::TimestampUs;
use vmm_sys_util::eventfd::EventFd;
use crate::device_manager::acpi::ACPIDeviceError;
#[cfg(target_arch = "x86_64")]
use crate::devices::legacy::I8042Device;
#[cfg(target_arch = "aarch64")]
use crate::devices::legacy::RTCDevice;
use crate::devices::legacy::serial::SerialOut;
use crate::devices::legacy::{IER_RDA_BIT, IER_RDA_OFFSET, SerialDevice};
use crate::devices::pseudo::BootTimer;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::transport::mmio::{IrqTrigger, MmioTransport};
use crate::resources::VmResources;
use crate::snapshot::Persist;
use crate::utils::open_file_write_nonblock;
use crate::vstate::bus::BusError;
use crate::vstate::memory::GuestMemoryMmap;
use crate::{EmulateSerialInitError, EventManager, Vm};
/// ACPI device manager.
pub mod acpi;
/// Legacy Device Manager.
pub mod legacy;
/// Memory Mapped I/O Manager.
pub mod mmio;
/// PCIe device manager
pub mod pci_mngr;
/// Device managers (de)serialization support.
pub mod persist;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// Error while creating a new [`DeviceManager`]
pub enum DeviceManagerCreateError {
/// Error with EventFd: {0}
EventFd(#[from] std::io::Error),
#[cfg(target_arch = "x86_64")]
/// Legacy device manager error: {0}
PortIOError(#[from] LegacyDeviceError),
/// Resource allocator error: {0}
ResourceAllocator(#[from] vm_allocator::Error),
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// Error while attaching a VirtIO device
pub enum AttachDeviceError {
/// MMIO transport error: {0}
MmioTransport(#[from] MmioError),
/// Error inserting device in bus: {0}
Bus(#[from] BusError),
/// Error while registering ACPI with KVM: {0}
AttachAcpiDevice(#[from] ACPIDeviceError),
#[cfg(target_arch = "aarch64")]
/// Cmdline error
Cmdline,
#[cfg(target_arch = "aarch64")]
/// Error creating serial device: {0}
CreateSerial(#[from] std::io::Error),
/// Error attach PCI device: {0}
PciTransport(#[from] PciManagerError),
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
/// Error while searching for a VirtIO device
pub enum FindDeviceError {
/// Device not found
DeviceNotFound,
}
#[derive(Debug)]
/// A manager of all peripheral devices of Firecracker
pub struct DeviceManager {
/// MMIO devices
pub mmio_devices: MMIODeviceManager,
#[cfg(target_arch = "x86_64")]
/// Legacy devices
pub legacy_devices: PortIODeviceManager,
/// ACPI devices
pub acpi_devices: ACPIDeviceManager,
/// PCIe devices
pub pci_devices: PciDevices,
}
impl DeviceManager {
// Adds `O_NONBLOCK` to the stdout flags.
fn set_stdout_nonblocking() {
// SAFETY: Call is safe since parameters are valid.
let flags = unsafe { libc::fcntl(libc::STDOUT_FILENO, libc::F_GETFL, 0) };
if flags < 0 {
error!("Could not get Firecracker stdout flags.");
}
// SAFETY: Call is safe since parameters are valid.
let rc =
unsafe { libc::fcntl(libc::STDOUT_FILENO, libc::F_SETFL, flags | libc::O_NONBLOCK) };
if rc < 0 {
error!("Could not set Firecracker stdout to non-blocking.");
}
}
/// Sets up the serial device.
fn setup_serial_device(
event_manager: &mut EventManager,
output: Option<&PathBuf>,
) -> Result<Arc<Mutex<SerialDevice>>, std::io::Error> {
let (serial_in, serial_out) = match output {
Some(path) => (None, open_file_write_nonblock(path).map(SerialOut::File)?),
None => {
Self::set_stdout_nonblocking();
(Some(std::io::stdin()), SerialOut::Stdout(std::io::stdout()))
}
};
let serial = Arc::new(Mutex::new(SerialDevice::new(serial_in, serial_out)?));
event_manager.add_subscriber(serial.clone());
Ok(serial)
}
#[cfg(target_arch = "x86_64")]
fn create_legacy_devices(
event_manager: &mut EventManager,
vcpus_exit_evt: &EventFd,
vm: &Vm,
serial_output: Option<&PathBuf>,
) -> Result<PortIODeviceManager, DeviceManagerCreateError> {
// Create serial device
let serial = Self::setup_serial_device(event_manager, serial_output)?;
let reset_evt = vcpus_exit_evt
.try_clone()
.map_err(DeviceManagerCreateError::EventFd)?;
// Create keyboard emulator for reset event
let i8042 = Arc::new(Mutex::new(I8042Device::new(reset_evt)?));
// create pio dev manager with legacy devices
let mut legacy_devices = PortIODeviceManager::new(serial, i8042)?;
legacy_devices.register_devices(vm)?;
Ok(legacy_devices)
}
#[cfg_attr(target_arch = "aarch64", allow(unused))]
pub fn new(
event_manager: &mut EventManager,
vcpus_exit_evt: &EventFd,
vm: &Vm,
serial_output: Option<&PathBuf>,
) -> Result<Self, DeviceManagerCreateError> {
#[cfg(target_arch = "x86_64")]
let legacy_devices =
Self::create_legacy_devices(event_manager, vcpus_exit_evt, vm, serial_output)?;
Ok(DeviceManager {
mmio_devices: MMIODeviceManager::new(),
#[cfg(target_arch = "x86_64")]
legacy_devices,
acpi_devices: ACPIDeviceManager::new(&mut vm.resource_allocator()),
pci_devices: PciDevices::new(),
})
}
/// Attaches an MMIO VirtioDevice device to the device manager and event manager.
pub(crate) fn attach_mmio_virtio_device<
T: 'static + VirtioDevice + MutEventSubscriber + Debug,
>(
&mut self,
vm: &Vm,
id: String,
device: Arc<Mutex<T>>,
cmdline: &mut Cmdline,
is_vhost_user: bool,
) -> Result<(), AttachDeviceError> {
let interrupt = Arc::new(IrqTrigger::new());
// The device mutex mustn't be locked here otherwise it will deadlock.
let device =
MmioTransport::new(vm.guest_memory().clone(), interrupt, device, is_vhost_user);
self.mmio_devices
.register_mmio_virtio_for_boot(vm, id, device, cmdline)?;
Ok(())
}
/// Attaches a VirtioDevice device to the device manager and event manager.
pub(crate) fn attach_virtio_device<T: 'static + VirtioDevice + MutEventSubscriber + Debug>(
&mut self,
vm: &Arc<Vm>,
id: String,
device: Arc<Mutex<T>>,
cmdline: &mut Cmdline,
is_vhost_user: bool,
) -> Result<(), AttachDeviceError> {
if self.pci_devices.pci_segment.is_some() {
self.pci_devices.attach_pci_virtio_device(vm, id, device)?;
} else {
self.attach_mmio_virtio_device(vm, id, device, cmdline, is_vhost_user)?;
}
Ok(())
}
/// Attaches a [`BootTimer`] to the VM
pub(crate) fn attach_boot_timer_device(
&mut self,
vm: &Vm,
request_ts: TimestampUs,
) -> Result<(), AttachDeviceError> {
let boot_timer = Arc::new(Mutex::new(BootTimer::new(request_ts)));
self.mmio_devices
.register_mmio_boot_timer(&vm.common.mmio_bus, boot_timer)?;
Ok(())
}
pub(crate) fn attach_vmgenid_device(&mut self, vm: &Vm) -> Result<(), AttachDeviceError> {
self.acpi_devices.attach_vmgenid(vm)?;
Ok(())
}
#[cfg(target_arch = "x86_64")]
pub(crate) fn attach_vmclock_device(&mut self, vm: &Vm) -> Result<(), AttachDeviceError> {
self.acpi_devices.attach_vmclock(vm)?;
Ok(())
}
#[cfg(target_arch = "aarch64")]
pub(crate) fn attach_legacy_devices_aarch64(
&mut self,
vm: &Vm,
event_manager: &mut EventManager,
cmdline: &mut Cmdline,
serial_out_path: Option<&PathBuf>,
) -> Result<(), AttachDeviceError> {
// Serial device setup.
let cmdline_contains_console = cmdline
.as_cstring()
.map_err(|_| AttachDeviceError::Cmdline)?
.into_string()
.map_err(|_| AttachDeviceError::Cmdline)?
.contains("console=");
if cmdline_contains_console {
let serial = Self::setup_serial_device(event_manager, serial_out_path)?;
self.mmio_devices.register_mmio_serial(vm, serial, None)?;
self.mmio_devices.add_mmio_serial_to_cmdline(cmdline)?;
}
let rtc = Arc::new(Mutex::new(RTCDevice::new()));
self.mmio_devices.register_mmio_rtc(vm, rtc, None)?;
Ok(())
}
/// Enables PCIe support for Firecracker devices
pub fn enable_pci(&mut self, vm: &Arc<Vm>) -> Result<(), PciManagerError> {
self.pci_devices.attach_pci_segment(vm)
}
/// Artificially kick VirtIO devices as if they had external events.
pub fn kick_virtio_devices(&self) {
info!("Artificially kick devices");
// Go through MMIO VirtIO devices
let _: Result<(), MmioError> = self.mmio_devices.for_each_virtio_device(|_, _, device| {
let mmio_transport_locked = device.inner.lock().expect("Poisoned lock");
mmio_transport_locked
.device()
.lock()
.expect("Poisoned lock")
.kick();
Ok(())
});
// Go through PCI VirtIO devices
for virtio_pci_device in self.pci_devices.virtio_devices.values() {
virtio_pci_device
.lock()
.expect("Poisoned lock")
.virtio_device()
.lock()
.expect("Poisoned lock")
.kick();
}
}
fn do_mark_virtio_queue_memory_dirty(
device: Arc<Mutex<dyn VirtioDevice>>,
mem: &GuestMemoryMmap,
) {
// SAFETY:
// This should never fail as we mark pages only if device has already been activated,
// and the address validation was already performed on device activation.
let mut locked_device = device.lock().expect("Poisoned lock");
if locked_device.is_activated() {
locked_device.mark_queue_memory_dirty(mem).unwrap()
}
}
/// Mark queue memory dirty for activated VirtIO devices
pub fn mark_virtio_queue_memory_dirty(&self, mem: &GuestMemoryMmap) {
// Go through MMIO VirtIO devices
let _: Result<(), Infallible> = self.mmio_devices.for_each_virtio_device(|_, _, device| {
let mmio_transport_locked = device.inner.lock().expect("Poisoned locked");
Self::do_mark_virtio_queue_memory_dirty(mmio_transport_locked.device(), mem);
Ok(())
});
// Go through PCI VirtIO devices
for device in self.pci_devices.virtio_devices.values() {
let virtio_device = device.lock().expect("Poisoned lock").virtio_device();
Self::do_mark_virtio_queue_memory_dirty(virtio_device, mem);
}
}
/// Get a VirtIO device of type `virtio_type` with ID `device_id`
pub fn get_virtio_device(
&self,
virtio_type: u32,
device_id: &str,
) -> Option<Arc<Mutex<dyn VirtioDevice>>> {
if self.pci_devices.pci_segment.is_some() {
let pci_device = self.pci_devices.get_virtio_device(virtio_type, device_id)?;
Some(
pci_device
.lock()
.expect("Poisoned lock")
.virtio_device()
.clone(),
)
} else {
let mmio_device = self
.mmio_devices
.get_virtio_device(virtio_type, device_id)?;
Some(
mmio_device
.inner
.lock()
.expect("Poisoned lock")
.device()
.clone(),
)
}
}
/// Run fn `f()` for the virtio device matching `virtio_type` and `id`.
pub fn with_virtio_device<T, F, R>(&self, id: &str, f: F) -> Result<R, FindDeviceError>
where
T: VirtioDevice + 'static + Debug,
F: FnOnce(&mut T) -> R,
{
if let Some(device) = self.get_virtio_device(T::const_device_type(), id) {
let mut dev = device.lock().expect("Poisoned lock");
Ok(f(dev
.as_mut_any()
.downcast_mut::<T>()
.expect("Invalid device for a given device type")))
} else {
Err(FindDeviceError::DeviceNotFound)
}
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
/// State of devices in the system
pub struct DevicesState {
/// MMIO devices state
pub mmio_state: persist::DeviceStates,
/// ACPI devices state
pub acpi_state: persist::ACPIDeviceManagerState,
/// PCI devices state
pub pci_state: pci_mngr::PciDevicesState,
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum DevicePersistError {
/// Error restoring MMIO devices: {0}
MmioRestore(#[from] persist::DevicePersistError),
/// Error restoring ACPI devices: {0}
AcpiRestore(#[from] ACPIDeviceError),
/// Error restoring PCI devices: {0}
PciRestore(#[from] PciManagerError),
/// Error notifying VMGenID device: {0}
VmGenidUpdate(#[from] std::io::Error),
/// Error resetting serial console: {0}
SerialRestore(#[from] EmulateSerialInitError),
/// Error inserting device in bus: {0}
Bus(#[from] BusError),
/// Error creating DeviceManager: {0}
DeviceManager(#[from] DeviceManagerCreateError),
}
pub struct DeviceRestoreArgs<'a> {
pub mem: &'a GuestMemoryMmap,
pub vm: &'a Arc<Vm>,
pub event_manager: &'a mut EventManager,
pub vcpus_exit_evt: &'a EventFd,
pub vm_resources: &'a mut VmResources,
pub instance_id: &'a str,
}
impl std::fmt::Debug for DeviceRestoreArgs<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DeviceRestoreArgs")
.field("mem", &self.mem)
.field("vm", &self.vm)
.field("vm_resources", &self.vm_resources)
.field("instance_id", &self.instance_id)
.finish()
}
}
impl<'a> Persist<'a> for DeviceManager {
type State = DevicesState;
type ConstructorArgs = DeviceRestoreArgs<'a>;
type Error = DevicePersistError;
fn save(&self) -> Self::State {
DevicesState {
mmio_state: self.mmio_devices.save(),
acpi_state: self.acpi_devices.save(),
pci_state: self.pci_devices.save(),
}
}
fn restore(
constructor_args: Self::ConstructorArgs,
state: &Self::State,
) -> Result<Self, Self::Error> {
// Setup legacy devices in case of x86
#[cfg(target_arch = "x86_64")]
let legacy_devices = Self::create_legacy_devices(
constructor_args.event_manager,
constructor_args.vcpus_exit_evt,
constructor_args.vm,
constructor_args.vm_resources.serial_out_path.as_ref(),
)?;
// Restore MMIO devices
let mmio_ctor_args = MMIODevManagerConstructorArgs {
mem: constructor_args.mem,
vm: constructor_args.vm,
event_manager: constructor_args.event_manager,
vm_resources: constructor_args.vm_resources,
instance_id: constructor_args.instance_id,
};
let mmio_devices = MMIODeviceManager::restore(mmio_ctor_args, &state.mmio_state)?;
// Restore ACPI devices
let mut acpi_devices = ACPIDeviceManager::restore(constructor_args.vm, &state.acpi_state)?;
acpi_devices.vmgenid.notify_guest()?;
// Restore PCI devices
let pci_ctor_args = PciDevicesConstructorArgs {
vm: constructor_args.vm,
mem: constructor_args.mem,
vm_resources: constructor_args.vm_resources,
instance_id: constructor_args.instance_id,
event_manager: constructor_args.event_manager,
};
let pci_devices = PciDevices::restore(pci_ctor_args, &state.pci_state)?;
let device_manager = DeviceManager {
mmio_devices,
#[cfg(target_arch = "x86_64")]
legacy_devices,
acpi_devices,
pci_devices,
};
// Restore serial.
// We need to do that after we restore mmio devices, otherwise it won't succeed in Aarch64
device_manager.emulate_serial_init()?;
Ok(device_manager)
}
}
impl DeviceManager {
/// Sets RDA bit in serial console
pub fn emulate_serial_init(&self) -> Result<(), EmulateSerialInitError> {
// When restoring from a previously saved state, there is no serial
// driver initialization, therefore the RDA (Received Data Available)
// interrupt is not enabled. Because of that, the driver won't get
// notified of any bytes that we send to the guest. The clean solution
// would be to save the whole serial device state when we do the vm
// serialization. For now we set that bit manually
#[cfg(target_arch = "aarch64")]
{
if let Some(device) = &self.mmio_devices.serial {
let mut device_locked = device.inner.lock().expect("Poisoned lock");
device_locked
.serial
.write(IER_RDA_OFFSET, IER_RDA_BIT)
.map_err(|_| EmulateSerialInitError(std::io::Error::last_os_error()))?;
}
Ok(())
}
#[cfg(target_arch = "x86_64")]
{
let mut serial = self
.legacy_devices
.stdio_serial
.lock()
.expect("Poisoned lock");
serial
.serial
.write(IER_RDA_OFFSET, IER_RDA_BIT)
.map_err(|_| EmulateSerialInitError(std::io::Error::last_os_error()))?;
Ok(())
}
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[cfg(target_arch = "aarch64")]
use crate::builder::tests::default_vmm;
use crate::vstate::resources::ResourceAllocator;
pub(crate) fn default_device_manager() -> DeviceManager {
let mut resource_allocator = ResourceAllocator::new();
let mmio_devices = MMIODeviceManager::new();
let acpi_devices = ACPIDeviceManager::new(&mut resource_allocator);
let pci_devices = PciDevices::new();
#[cfg(target_arch = "x86_64")]
let legacy_devices = PortIODeviceManager::new(
Arc::new(Mutex::new(
SerialDevice::new(None, SerialOut::Sink).unwrap(),
)),
Arc::new(Mutex::new(
I8042Device::new(EventFd::new(libc::EFD_NONBLOCK).unwrap()).unwrap(),
)),
)
.unwrap();
DeviceManager {
mmio_devices,
#[cfg(target_arch = "x86_64")]
legacy_devices,
acpi_devices,
pci_devices,
}
}
#[cfg(target_arch = "aarch64")]
#[test]
fn test_attach_legacy_serial() {
let mut vmm = default_vmm();
assert!(vmm.device_manager.mmio_devices.rtc.is_none());
assert!(vmm.device_manager.mmio_devices.serial.is_none());
let mut cmdline = Cmdline::new(4096).unwrap();
let mut event_manager = EventManager::new().unwrap();
vmm.device_manager
.attach_legacy_devices_aarch64(&vmm.vm, &mut event_manager, &mut cmdline, None)
.unwrap();
assert!(vmm.device_manager.mmio_devices.rtc.is_some());
assert!(vmm.device_manager.mmio_devices.serial.is_none());
let mut vmm = default_vmm();
cmdline.insert("console", "/dev/blah").unwrap();
vmm.device_manager
.attach_legacy_devices_aarch64(&vmm.vm, &mut event_manager, &mut cmdline, None)
.unwrap();
assert!(vmm.device_manager.mmio_devices.rtc.is_some());
assert!(vmm.device_manager.mmio_devices.serial.is_some());
assert!(
cmdline
.as_cstring()
.unwrap()
.into_string()
.unwrap()
.contains(&format!(
"earlycon=uart,mmio,0x{:08x}",
vmm.device_manager
.mmio_devices
.serial
.as_ref()
.unwrap()
.resources
.addr
))
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/device_manager/legacy.rs | src/vmm/src/device_manager/legacy.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
#![cfg(target_arch = "x86_64")]
use std::fmt::Debug;
use std::sync::{Arc, Mutex};
use acpi_tables::aml::AmlError;
use acpi_tables::{Aml, aml};
use libc::EFD_NONBLOCK;
use vm_superio::Serial;
use vmm_sys_util::eventfd::EventFd;
use crate::Vm;
use crate::devices::legacy::serial::SerialOut;
use crate::devices::legacy::{EventFdTrigger, I8042Device, SerialDevice, SerialEventsWrapper};
use crate::vstate::bus::BusError;
/// Errors corresponding to the `PortIODeviceManager`.
#[derive(Debug, derive_more::From, thiserror::Error, displaydoc::Display)]
pub enum LegacyDeviceError {
/// Failed to add legacy device to Bus: {0}
BusError(BusError),
/// Failed to create EventFd: {0}
EventFd(std::io::Error),
}
/// The `PortIODeviceManager` is a wrapper that is used for registering legacy devices
/// on an I/O Bus. It currently manages the uart and i8042 devices.
/// The `LegacyDeviceManger` should be initialized only by using the constructor.
#[derive(Debug)]
pub struct PortIODeviceManager {
// BusDevice::Serial
pub stdio_serial: Arc<Mutex<SerialDevice>>,
// BusDevice::I8042Device
pub i8042: Arc<Mutex<I8042Device>>,
// Communication event on ports 1 & 3.
pub com_evt_1_3: EventFdTrigger,
// Communication event on ports 2 & 4.
pub com_evt_2_4: EventFdTrigger,
// Keyboard event.
pub kbd_evt: EventFd,
}
impl PortIODeviceManager {
/// x86 global system interrupt for communication events on serial ports 1
/// & 3. See
/// <https://en.wikipedia.org/wiki/Interrupt_request_(PC_architecture)>.
const COM_EVT_1_3_GSI: u32 = 4;
/// x86 global system interrupt for communication events on serial ports 2
/// & 4. See
/// <https://en.wikipedia.org/wiki/Interrupt_request_(PC_architecture)>.
const COM_EVT_2_4_GSI: u32 = 3;
/// x86 global system interrupt for keyboard port.
/// See <https://en.wikipedia.org/wiki/Interrupt_request_(PC_architecture)>.
const KBD_EVT_GSI: u32 = 1;
/// Legacy serial port device addresses. See
/// <https://tldp.org/HOWTO/Serial-HOWTO-10.html#ss10.1>.
const SERIAL_PORT_ADDRESSES: [u64; 4] = [0x3f8, 0x2f8, 0x3e8, 0x2e8];
/// Size of legacy serial ports.
const SERIAL_PORT_SIZE: u64 = 0x8;
/// i8042 keyboard data register address. See
/// <https://elixir.bootlin.com/linux/latest/source/drivers/input/serio/i8042-io.h#L41>.
const I8042_KDB_DATA_REGISTER_ADDRESS: u64 = 0x060;
/// i8042 keyboard data register size.
const I8042_KDB_DATA_REGISTER_SIZE: u64 = 0x5;
/// Create a new DeviceManager handling legacy devices (uart, i8042).
pub fn new(
stdio_serial: Arc<Mutex<SerialDevice>>,
i8042: Arc<Mutex<I8042Device>>,
) -> Result<Self, LegacyDeviceError> {
let com_evt_1_3 = stdio_serial
.lock()
.expect("Poisoned lock")
.serial
.interrupt_evt()
.try_clone()?;
let com_evt_2_4 = EventFdTrigger::new(EventFd::new(EFD_NONBLOCK)?);
let kbd_evt = i8042
.lock()
.expect("Poisoned lock")
.kbd_interrupt_evt
.try_clone()?;
Ok(PortIODeviceManager {
stdio_serial,
i8042,
com_evt_1_3,
com_evt_2_4,
kbd_evt,
})
}
/// Register supported legacy devices.
pub fn register_devices(&mut self, vm: &Vm) -> Result<(), LegacyDeviceError> {
let serial_2_4 = Arc::new(Mutex::new(SerialDevice {
serial: Serial::with_events(
self.com_evt_2_4.try_clone()?.try_clone()?,
SerialEventsWrapper {
buffer_ready_event_fd: None,
},
SerialOut::Sink,
),
input: None,
}));
let serial_1_3 = Arc::new(Mutex::new(SerialDevice {
serial: Serial::with_events(
self.com_evt_1_3.try_clone()?.try_clone()?,
SerialEventsWrapper {
buffer_ready_event_fd: None,
},
SerialOut::Sink,
),
input: None,
}));
let io_bus = &vm.pio_bus;
io_bus.insert(
self.stdio_serial.clone(),
Self::SERIAL_PORT_ADDRESSES[0],
Self::SERIAL_PORT_SIZE,
)?;
io_bus.insert(
serial_2_4.clone(),
Self::SERIAL_PORT_ADDRESSES[1],
Self::SERIAL_PORT_SIZE,
)?;
io_bus.insert(
serial_1_3,
Self::SERIAL_PORT_ADDRESSES[2],
Self::SERIAL_PORT_SIZE,
)?;
io_bus.insert(
serial_2_4,
Self::SERIAL_PORT_ADDRESSES[3],
Self::SERIAL_PORT_SIZE,
)?;
io_bus.insert(
self.i8042.clone(),
Self::I8042_KDB_DATA_REGISTER_ADDRESS,
Self::I8042_KDB_DATA_REGISTER_SIZE,
)?;
vm.register_irq(&self.com_evt_1_3, Self::COM_EVT_1_3_GSI)
.map_err(|e| {
LegacyDeviceError::EventFd(std::io::Error::from_raw_os_error(e.errno()))
})?;
vm.register_irq(&self.com_evt_2_4, Self::COM_EVT_2_4_GSI)
.map_err(|e| {
LegacyDeviceError::EventFd(std::io::Error::from_raw_os_error(e.errno()))
})?;
vm.register_irq(&self.kbd_evt, Self::KBD_EVT_GSI)
.map_err(|e| {
LegacyDeviceError::EventFd(std::io::Error::from_raw_os_error(e.errno()))
})?;
Ok(())
}
pub(crate) fn append_aml_bytes(bytes: &mut Vec<u8>) -> Result<(), AmlError> {
// Set up COM devices
let gsi = [
Self::COM_EVT_1_3_GSI,
Self::COM_EVT_2_4_GSI,
Self::COM_EVT_1_3_GSI,
Self::COM_EVT_2_4_GSI,
];
for com in 0u8..4 {
// COM1
aml::Device::new(
format!("_SB_.COM{}", com + 1).as_str().try_into()?,
vec![
&aml::Name::new("_HID".try_into()?, &aml::EisaName::new("PNP0501")?)?,
&aml::Name::new("_UID".try_into()?, &com)?,
&aml::Name::new("_DDN".try_into()?, &format!("COM{}", com + 1))?,
&aml::Name::new(
"_CRS".try_into().unwrap(),
&aml::ResourceTemplate::new(vec![
&aml::Interrupt::new(true, true, false, false, gsi[com as usize]),
&aml::Io::new(
PortIODeviceManager::SERIAL_PORT_ADDRESSES[com as usize]
.try_into()
.unwrap(),
PortIODeviceManager::SERIAL_PORT_ADDRESSES[com as usize]
.try_into()
.unwrap(),
1,
PortIODeviceManager::SERIAL_PORT_SIZE.try_into().unwrap(),
),
]),
)?,
],
)
.append_aml_bytes(bytes)?;
}
// Setup i8042
aml::Device::new(
"_SB_.PS2_".try_into()?,
vec![
&aml::Name::new("_HID".try_into()?, &aml::EisaName::new("PNP0303")?)?,
&aml::Method::new(
"_STA".try_into()?,
0,
false,
vec![&aml::Return::new(&0x0fu8)],
),
&aml::Name::new(
"_CRS".try_into()?,
&aml::ResourceTemplate::new(vec![
&aml::Io::new(
PortIODeviceManager::I8042_KDB_DATA_REGISTER_ADDRESS
.try_into()
.unwrap(),
PortIODeviceManager::I8042_KDB_DATA_REGISTER_ADDRESS
.try_into()
.unwrap(),
1u8,
1u8,
),
// Fake a command port so Linux stops complaining
&aml::Io::new(0x0064, 0x0064, 1u8, 1u8),
&aml::Interrupt::new(true, true, false, false, Self::KBD_EVT_GSI),
]),
)?,
],
)
.append_aml_bytes(bytes)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::vstate::vm::tests::setup_vm_with_memory;
#[test]
fn test_register_legacy_devices() {
let (_, vm) = setup_vm_with_memory(0x1000);
vm.setup_irqchip().unwrap();
let mut ldm = PortIODeviceManager::new(
Arc::new(Mutex::new(SerialDevice {
serial: Serial::with_events(
EventFdTrigger::new(EventFd::new(EFD_NONBLOCK).unwrap()),
SerialEventsWrapper {
buffer_ready_event_fd: None,
},
SerialOut::Sink,
),
input: None,
})),
Arc::new(Mutex::new(
I8042Device::new(EventFd::new(libc::EFD_NONBLOCK).unwrap()).unwrap(),
)),
)
.unwrap();
ldm.register_devices(&vm).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/src/device_manager/mmio.rs | src/vmm/src/device_manager/mmio.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::collections::HashMap;
use std::fmt::Debug;
use std::sync::{Arc, Mutex};
#[cfg(target_arch = "x86_64")]
use acpi_tables::{Aml, aml};
use kvm_ioctls::IoEventAddress;
use linux_loader::cmdline as kernel_cmdline;
#[cfg(target_arch = "x86_64")]
use log::debug;
use serde::{Deserialize, Serialize};
use vm_allocator::AllocPolicy;
use crate::Vm;
use crate::arch::BOOT_DEVICE_MEM_START;
#[cfg(target_arch = "aarch64")]
use crate::arch::{RTC_MEM_START, SERIAL_MEM_START};
#[cfg(target_arch = "aarch64")]
use crate::devices::legacy::{RTCDevice, SerialDevice};
use crate::devices::pseudo::BootTimer;
use crate::devices::virtio::transport::mmio::MmioTransport;
use crate::vstate::bus::{Bus, BusError};
#[cfg(target_arch = "x86_64")]
use crate::vstate::memory::GuestAddress;
use crate::vstate::resources::ResourceAllocator;
/// Errors for MMIO device manager.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum MmioError {
/// Failed to allocate requested resource: {0}
Allocator(#[from] vm_allocator::Error),
/// Failed to insert device on the bus: {0}
BusInsert(#[from] BusError),
/// Failed to allocate requested resourc: {0}
Cmdline(#[from] linux_loader::cmdline::Error),
/// Could not create IRQ for MMIO device: {0}
CreateIrq(#[from] std::io::Error),
/// Invalid MMIO IRQ configuration.
InvalidIrqConfig,
/// Failed to register IO event: {0}
RegisterIoEvent(kvm_ioctls::Error),
/// Failed to register irqfd: {0}
RegisterIrqFd(kvm_ioctls::Error),
#[cfg(target_arch = "x86_64")]
/// Failed to create AML code for device
AmlError(#[from] aml::AmlError),
}
/// This represents the size of the mmio device specified to the kernel through ACPI and as a
/// command line option.
/// It has to be larger than 0x100 (the offset where the configuration space starts from
/// the beginning of the memory mapped device registers) + the size of the configuration space
/// Currently hardcoded to 4K.
pub const MMIO_LEN: u64 = 0x1000;
/// Stores the address range and irq allocated to this device.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct MMIODeviceInfo {
/// Mmio address at which the device is registered.
pub addr: u64,
/// Mmio addr range length.
pub len: u64,
/// Used GSI (interrupt line) for the device.
pub gsi: Option<u32>,
}
#[cfg(target_arch = "x86_64")]
fn add_virtio_aml(
dsdt_data: &mut Vec<u8>,
addr: u64,
len: u64,
gsi: u32,
) -> Result<(), aml::AmlError> {
let dev_id = gsi - crate::arch::GSI_LEGACY_START;
debug!(
"acpi: Building AML for VirtIO device _SB_.V{:03}. memory range: {:#010x}:{} gsi: {}",
dev_id, addr, len, gsi
);
aml::Device::new(
format!("V{:03}", dev_id).as_str().try_into()?,
vec![
&aml::Name::new("_HID".try_into()?, &"LNRO0005")?,
&aml::Name::new("_UID".try_into()?, &dev_id)?,
&aml::Name::new("_CCA".try_into()?, &aml::ONE)?,
&aml::Name::new(
"_CRS".try_into()?,
&aml::ResourceTemplate::new(vec![
&aml::Memory32Fixed::new(
true,
addr.try_into().unwrap(),
len.try_into().unwrap(),
),
&aml::Interrupt::new(true, true, false, false, gsi),
]),
)?,
],
)
.append_aml_bytes(dsdt_data)
}
#[derive(Debug, Clone)]
/// A descriptor for MMIO devices
pub struct MMIODevice<T> {
/// MMIO resources allocated to the device
pub(crate) resources: MMIODeviceInfo,
/// The actual device
pub(crate) inner: Arc<Mutex<T>>,
}
/// Manages the complexities of registering a MMIO device.
#[derive(Debug, Default)]
pub struct MMIODeviceManager {
/// VirtIO devices using an MMIO transport layer
pub(crate) virtio_devices: HashMap<(u32, String), MMIODevice<MmioTransport>>,
/// Boot timer device
pub(crate) boot_timer: Option<MMIODevice<BootTimer>>,
#[cfg(target_arch = "aarch64")]
/// Real-Time clock on Aarch64 platforms
pub(crate) rtc: Option<MMIODevice<RTCDevice>>,
#[cfg(target_arch = "aarch64")]
/// Serial device on Aarch64 platforms
pub(crate) serial: Option<MMIODevice<SerialDevice>>,
#[cfg(target_arch = "x86_64")]
// We create the AML byte code for every VirtIO device in the order we build
// it, so that we ensure the root block device is appears first in the DSDT.
// This is needed, so that the root device appears as `/dev/vda` in the guest
// filesystem.
// The alternative would be that we iterate the bus to get the data after all
// of the devices are build. However, iterating the bus won't give us the
// devices in the order they were added.
pub(crate) dsdt_data: Vec<u8>,
}
impl MMIODeviceManager {
/// Create a new DeviceManager handling mmio devices (virtio net, block).
pub fn new() -> MMIODeviceManager {
Default::default()
}
/// Allocates resources for a new device to be added.
fn allocate_mmio_resources(
&mut self,
resource_allocator: &mut ResourceAllocator,
irq_count: u32,
) -> Result<MMIODeviceInfo, MmioError> {
let gsi = match resource_allocator.allocate_gsi_legacy(irq_count)?[..] {
[] => None,
[gsi] => Some(gsi),
_ => return Err(MmioError::InvalidIrqConfig),
};
let device_info = MMIODeviceInfo {
addr: resource_allocator.allocate_32bit_mmio_memory(
MMIO_LEN,
MMIO_LEN,
AllocPolicy::FirstMatch,
)?,
len: MMIO_LEN,
gsi,
};
Ok(device_info)
}
/// Register a virtio-over-MMIO device to be used via MMIO transport at a specific slot.
pub fn register_mmio_virtio(
&mut self,
vm: &Vm,
device_id: String,
device: MMIODevice<MmioTransport>,
) -> Result<(), MmioError> {
// Our virtio devices are currently hardcoded to use a single IRQ.
// Validate that requirement.
let gsi = device.resources.gsi.ok_or(MmioError::InvalidIrqConfig)?;
let identifier;
{
let mmio_device = device.inner.lock().expect("Poisoned lock");
let locked_device = mmio_device.locked_device();
identifier = (locked_device.device_type(), device_id);
for (i, queue_evt) in locked_device.queue_events().iter().enumerate() {
let io_addr = IoEventAddress::Mmio(
device.resources.addr + u64::from(crate::devices::virtio::NOTIFY_REG_OFFSET),
);
vm.fd()
.register_ioevent(queue_evt, &io_addr, u32::try_from(i).unwrap())
.map_err(MmioError::RegisterIoEvent)?;
}
vm.register_irq(&mmio_device.interrupt.irq_evt, gsi)
.map_err(MmioError::RegisterIrqFd)?;
}
vm.common.mmio_bus.insert(
device.inner.clone(),
device.resources.addr,
device.resources.len,
)?;
self.virtio_devices.insert(identifier, device);
Ok(())
}
/// Append a registered virtio-over-MMIO device to the kernel cmdline.
#[cfg(target_arch = "x86_64")]
pub fn add_virtio_device_to_cmdline(
cmdline: &mut kernel_cmdline::Cmdline,
device_info: &MMIODeviceInfo,
) -> Result<(), MmioError> {
// as per doc, [virtio_mmio.]device=<size>@<baseaddr>:<irq> needs to be appended
// to kernel command line for virtio mmio devices to get recognized
// the size parameter has to be transformed to KiB, so dividing hexadecimal value in
// bytes to 1024; further, the '{}' formatting rust construct will automatically
// transform it to decimal
cmdline
.add_virtio_mmio_device(
device_info.len,
GuestAddress(device_info.addr),
device_info.gsi.unwrap(),
None,
)
.map_err(MmioError::Cmdline)
}
/// Allocate slot and register an already created virtio-over-MMIO device. Also Adds the device
/// to the boot cmdline.
pub fn register_mmio_virtio_for_boot(
&mut self,
vm: &Vm,
device_id: String,
mmio_device: MmioTransport,
_cmdline: &mut kernel_cmdline::Cmdline,
) -> Result<(), MmioError> {
let device = MMIODevice {
resources: self.allocate_mmio_resources(&mut vm.resource_allocator(), 1)?,
inner: Arc::new(Mutex::new(mmio_device)),
};
#[cfg(target_arch = "x86_64")]
{
Self::add_virtio_device_to_cmdline(_cmdline, &device.resources)?;
add_virtio_aml(
&mut self.dsdt_data,
device.resources.addr,
device.resources.len,
// We are sure that `irqs` has at least one element; allocate_mmio_resources makes
// sure of it.
device.resources.gsi.unwrap(),
)?;
}
self.register_mmio_virtio(vm, device_id, device)?;
Ok(())
}
#[cfg(target_arch = "aarch64")]
/// Register an early console at the specified MMIO configuration if given as parameter,
/// otherwise allocate a new MMIO resources for it.
pub fn register_mmio_serial(
&mut self,
vm: &Vm,
serial: Arc<Mutex<SerialDevice>>,
device_info_opt: Option<MMIODeviceInfo>,
) -> Result<(), MmioError> {
// Create a new MMIODeviceInfo object on boot path or unwrap the
// existing object on restore path.
let device_info = if let Some(device_info) = device_info_opt {
device_info
} else {
let gsi = vm.resource_allocator().allocate_gsi_legacy(1)?;
MMIODeviceInfo {
addr: SERIAL_MEM_START,
len: MMIO_LEN,
gsi: Some(gsi[0]),
}
};
vm.register_irq(
serial.lock().expect("Poisoned lock").serial.interrupt_evt(),
device_info.gsi.unwrap(),
)
.map_err(MmioError::RegisterIrqFd)?;
let device = MMIODevice {
resources: device_info,
inner: serial,
};
vm.common.mmio_bus.insert(
device.inner.clone(),
device.resources.addr,
device.resources.len,
)?;
self.serial = Some(device);
Ok(())
}
#[cfg(target_arch = "aarch64")]
/// Append the registered early console to the kernel cmdline.
///
/// This assumes that the device has been registered with the device manager.
pub fn add_mmio_serial_to_cmdline(
&self,
cmdline: &mut kernel_cmdline::Cmdline,
) -> Result<(), MmioError> {
let device = self.serial.as_ref().unwrap();
cmdline.insert(
"earlycon",
&format!("uart,mmio,0x{:08x}", device.resources.addr),
)?;
Ok(())
}
#[cfg(target_arch = "aarch64")]
/// Create and register a MMIO RTC device at the specified MMIO configuration if
/// given as parameter, otherwise allocate a new MMIO resources for it.
pub fn register_mmio_rtc(
&mut self,
vm: &Vm,
rtc: Arc<Mutex<RTCDevice>>,
device_info_opt: Option<MMIODeviceInfo>,
) -> Result<(), MmioError> {
// Create a new MMIODeviceInfo object on boot path or unwrap the
// existing object on restore path.
let device_info = if let Some(device_info) = device_info_opt {
device_info
} else {
let gsi = vm.resource_allocator().allocate_gsi_legacy(1)?;
MMIODeviceInfo {
addr: RTC_MEM_START,
len: MMIO_LEN,
gsi: Some(gsi[0]),
}
};
let device = MMIODevice {
resources: device_info,
inner: rtc,
};
vm.common.mmio_bus.insert(
device.inner.clone(),
device.resources.addr,
device.resources.len,
)?;
self.rtc = Some(device);
Ok(())
}
/// Register a boot timer device.
pub fn register_mmio_boot_timer(
&mut self,
mmio_bus: &Bus,
boot_timer: Arc<Mutex<BootTimer>>,
) -> Result<(), MmioError> {
// Attach a new boot timer device.
let device_info = MMIODeviceInfo {
addr: BOOT_DEVICE_MEM_START,
len: MMIO_LEN,
gsi: None,
};
let device = MMIODevice {
resources: device_info,
inner: boot_timer,
};
mmio_bus.insert(
device.inner.clone(),
device.resources.addr,
device.resources.len,
)?;
self.boot_timer = Some(device);
Ok(())
}
/// Gets the specified device.
pub fn get_virtio_device(
&self,
virtio_type: u32,
device_id: &str,
) -> Option<&MMIODevice<MmioTransport>> {
self.virtio_devices
.get(&(virtio_type, device_id.to_string()))
}
/// Run fn for each registered virtio device.
pub fn for_each_virtio_device<F, E: Debug>(&self, mut f: F) -> Result<(), E>
where
F: FnMut(&u32, &String, &MMIODevice<MmioTransport>) -> Result<(), E>,
{
for ((virtio_type, device_id), mmio_device) in &self.virtio_devices {
f(virtio_type, device_id, mmio_device)?;
}
Ok(())
}
#[cfg(target_arch = "aarch64")]
pub fn virtio_device_info(&self) -> Vec<&MMIODeviceInfo> {
let mut device_info = Vec::new();
for (_, dev) in self.virtio_devices.iter() {
device_info.push(&dev.resources);
}
device_info
}
#[cfg(target_arch = "aarch64")]
pub fn rtc_device_info(&self) -> Option<&MMIODeviceInfo> {
self.rtc.as_ref().map(|device| &device.resources)
}
#[cfg(target_arch = "aarch64")]
pub fn serial_device_info(&self) -> Option<&MMIODeviceInfo> {
self.serial.as_ref().map(|device| &device.resources)
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::ops::Deref;
use std::sync::Arc;
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::devices::virtio::ActivateError;
use crate::devices::virtio::device::VirtioDevice;
use crate::devices::virtio::queue::Queue;
use crate::devices::virtio::transport::VirtioInterrupt;
use crate::devices::virtio::transport::mmio::IrqTrigger;
use crate::test_utils::multi_region_mem_raw;
use crate::vstate::kvm::Kvm;
use crate::vstate::memory::{GuestAddress, GuestMemoryMmap};
use crate::{Vm, arch, impl_device_type};
const QUEUE_SIZES: &[u16] = &[64];
impl MMIODeviceManager {
pub(crate) fn register_virtio_test_device(
&mut self,
vm: &Vm,
guest_mem: GuestMemoryMmap,
device: Arc<Mutex<dyn VirtioDevice>>,
cmdline: &mut kernel_cmdline::Cmdline,
dev_id: &str,
) -> Result<u64, MmioError> {
let interrupt = Arc::new(IrqTrigger::new());
let mmio_device = MmioTransport::new(guest_mem, interrupt, device.clone(), false);
self.register_mmio_virtio_for_boot(vm, dev_id.to_string(), mmio_device, cmdline)?;
Ok(self
.get_virtio_device(device.lock().unwrap().device_type(), dev_id)
.unwrap()
.resources
.addr)
}
#[cfg(target_arch = "x86_64")]
/// Gets the number of interrupts used by the devices registered.
pub fn used_irqs_count(&self) -> usize {
self.virtio_devices
.iter()
.filter(|(_, mmio_dev)| mmio_dev.resources.gsi.is_some())
.count()
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub(crate) struct DummyDevice {
dummy: u32,
queues: Vec<Queue>,
queue_evts: [EventFd; 1],
interrupt_trigger: Option<Arc<IrqTrigger>>,
}
impl DummyDevice {
pub fn new() -> Self {
DummyDevice {
dummy: 0,
queues: QUEUE_SIZES.iter().map(|&s| Queue::new(s)).collect(),
queue_evts: [EventFd::new(libc::EFD_NONBLOCK).expect("cannot create eventFD")],
interrupt_trigger: None,
}
}
}
impl VirtioDevice for DummyDevice {
impl_device_type!(0);
fn avail_features(&self) -> u64 {
0
}
fn acked_features(&self) -> u64 {
0
}
fn set_acked_features(&mut self, _: u64) {}
fn queues(&self) -> &[Queue] {
&self.queues
}
fn queues_mut(&mut self) -> &mut [Queue] {
&mut self.queues
}
fn queue_events(&self) -> &[EventFd] {
&self.queue_evts
}
fn interrupt_trigger(&self) -> &dyn VirtioInterrupt {
self.interrupt_trigger.as_ref().unwrap().deref()
}
fn ack_features_by_page(&mut self, page: u32, value: u32) {
let _ = page;
let _ = value;
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let _ = offset;
let _ = data;
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
let _ = offset;
let _ = data;
}
fn activate(
&mut self,
_: GuestMemoryMmap,
_: Arc<dyn VirtioInterrupt>,
) -> Result<(), ActivateError> {
Ok(())
}
fn is_activated(&self) -> bool {
false
}
}
#[test]
#[cfg_attr(target_arch = "x86_64", allow(unused_mut))]
fn test_register_virtio_device() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let guest_mem = multi_region_mem_raw(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]);
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
let mut vm = Vm::new(&kvm).unwrap();
vm.register_dram_memory_regions(guest_mem).unwrap();
let mut device_manager = MMIODeviceManager::new();
let mut cmdline = kernel_cmdline::Cmdline::new(4096).unwrap();
let dummy = Arc::new(Mutex::new(DummyDevice::new()));
#[cfg(target_arch = "x86_64")]
vm.setup_irqchip().unwrap();
#[cfg(target_arch = "aarch64")]
vm.setup_irqchip(1).unwrap();
device_manager
.register_virtio_test_device(
&vm,
vm.guest_memory().clone(),
dummy,
&mut cmdline,
"dummy",
)
.unwrap();
assert!(device_manager.get_virtio_device(0, "foo").is_none());
let dev = device_manager.get_virtio_device(0, "dummy").unwrap();
assert_eq!(dev.resources.addr, arch::MEM_32BIT_DEVICES_START);
assert_eq!(dev.resources.len, MMIO_LEN);
assert_eq!(dev.resources.gsi, Some(arch::GSI_LEGACY_START));
device_manager
.for_each_virtio_device(|virtio_type, device_id, mmio_device| {
assert_eq!(*virtio_type, 0);
assert_eq!(device_id, "dummy");
assert_eq!(mmio_device.resources.addr, arch::MEM_32BIT_DEVICES_START);
assert_eq!(mmio_device.resources.len, MMIO_LEN);
assert_eq!(mmio_device.resources.gsi, Some(arch::GSI_LEGACY_START));
Ok::<(), ()>(())
})
.unwrap();
}
#[test]
#[cfg_attr(target_arch = "x86_64", allow(unused_mut))]
fn test_register_too_many_devices() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let guest_mem = multi_region_mem_raw(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]);
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
let mut vm = Vm::new(&kvm).unwrap();
vm.register_dram_memory_regions(guest_mem).unwrap();
let mut device_manager = MMIODeviceManager::new();
let mut cmdline = kernel_cmdline::Cmdline::new(4096).unwrap();
#[cfg(target_arch = "x86_64")]
vm.setup_irqchip().unwrap();
#[cfg(target_arch = "aarch64")]
vm.setup_irqchip(1).unwrap();
for _i in crate::arch::GSI_LEGACY_START..=crate::arch::GSI_LEGACY_END {
device_manager
.register_virtio_test_device(
&vm,
vm.guest_memory().clone(),
Arc::new(Mutex::new(DummyDevice::new())),
&mut cmdline,
"dummy1",
)
.unwrap();
}
assert_eq!(
format!(
"{}",
device_manager
.register_virtio_test_device(
&vm,
vm.guest_memory().clone(),
Arc::new(Mutex::new(DummyDevice::new())),
&mut cmdline,
"dummy2"
)
.unwrap_err()
),
"Failed to allocate requested resource: The requested resource is not available."
.to_string()
);
}
#[test]
fn test_dummy_device() {
let dummy = DummyDevice::new();
assert_eq!(dummy.device_type(), 0);
assert_eq!(dummy.queues().len(), QUEUE_SIZES.len());
}
#[test]
#[cfg_attr(target_arch = "x86_64", allow(unused_mut))]
fn test_device_info() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let guest_mem = multi_region_mem_raw(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]);
let kvm = Kvm::new(vec![]).expect("Cannot create Kvm");
let mut vm = Vm::new(&kvm).unwrap();
vm.register_dram_memory_regions(guest_mem).unwrap();
#[cfg(target_arch = "x86_64")]
vm.setup_irqchip().unwrap();
#[cfg(target_arch = "aarch64")]
vm.setup_irqchip(1).unwrap();
let mut device_manager = MMIODeviceManager::new();
let mut cmdline = kernel_cmdline::Cmdline::new(4096).unwrap();
let dummy = Arc::new(Mutex::new(DummyDevice::new()));
let type_id = dummy.lock().unwrap().device_type();
let id = String::from("foo");
let addr = device_manager
.register_virtio_test_device(&vm, vm.guest_memory().clone(), dummy, &mut cmdline, &id)
.unwrap();
assert!(device_manager.get_virtio_device(type_id, &id).is_some());
assert_eq!(
addr,
device_manager.virtio_devices[&(type_id, id.clone())]
.resources
.addr
);
assert_eq!(
crate::arch::GSI_LEGACY_START,
device_manager.virtio_devices[&(type_id, id)]
.resources
.gsi
.unwrap()
);
let id = "bar";
assert!(device_manager.get_virtio_device(type_id, id).is_none());
let dummy2 = Arc::new(Mutex::new(DummyDevice::new()));
let id2 = String::from("foo2");
device_manager
.register_virtio_test_device(&vm, vm.guest_memory().clone(), dummy2, &mut cmdline, &id2)
.unwrap();
let mut count = 0;
let _: Result<(), MmioError> =
device_manager.for_each_virtio_device(|devtype, devid, _| {
assert_eq!(*devtype, type_id);
match devid.as_str() {
"foo" => count += 1,
"foo2" => count += 2,
_ => unreachable!(),
};
Ok(())
});
assert_eq!(count, 3);
#[cfg(target_arch = "x86_64")]
assert_eq!(device_manager.used_irqs_count(), 2);
}
#[test]
fn test_no_irq_allocation() {
let mut device_manager = MMIODeviceManager::new();
let mut resource_allocator = ResourceAllocator::new();
let device_info = device_manager
.allocate_mmio_resources(&mut resource_allocator, 0)
.unwrap();
assert!(device_info.gsi.is_none());
}
#[test]
fn test_irq_allocation() {
let mut device_manager = MMIODeviceManager::new();
let mut resource_allocator = ResourceAllocator::new();
let device_info = device_manager
.allocate_mmio_resources(&mut resource_allocator, 1)
.unwrap();
assert_eq!(device_info.gsi.unwrap(), crate::arch::GSI_LEGACY_START);
}
#[test]
fn test_allocation_failure() {
let mut device_manager = MMIODeviceManager::new();
let mut resource_allocator = ResourceAllocator::new();
assert_eq!(
format!(
"{}",
device_manager
.allocate_mmio_resources(&mut resource_allocator, 2)
.unwrap_err()
),
"Invalid MMIO IRQ configuration.".to_string()
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/tests/integration_tests.rs | src/vmm/tests/integration_tests.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::cast_possible_truncation, clippy::tests_outside_test_module)]
use std::io::{Seek, SeekFrom};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use vmm::builder::build_and_boot_microvm;
use vmm::devices::virtio::block::CacheType;
use vmm::persist::{MicrovmState, MicrovmStateError, VmInfo, snapshot_state_sanity_check};
use vmm::resources::VmResources;
use vmm::rpc_interface::{
LoadSnapshotError, PrebootApiController, RuntimeApiController, VmmAction, VmmActionError,
};
use vmm::seccomp::get_empty_filters;
use vmm::snapshot::Snapshot;
use vmm::test_utils::mock_resources::{MockVmResources, NOISY_KERNEL_IMAGE};
use vmm::test_utils::{create_vmm, default_vmm, default_vmm_no_boot};
use vmm::vmm_config::balloon::BalloonDeviceConfig;
use vmm::vmm_config::boot_source::BootSourceConfig;
use vmm::vmm_config::drive::BlockDeviceConfig;
use vmm::vmm_config::instance_info::{InstanceInfo, VmState};
use vmm::vmm_config::machine_config::{MachineConfig, MachineConfigUpdate};
use vmm::vmm_config::net::NetworkInterfaceConfig;
use vmm::vmm_config::snapshot::{
CreateSnapshotParams, LoadSnapshotParams, MemBackendConfig, MemBackendType, SnapshotType,
};
use vmm::vmm_config::vsock::VsockDeviceConfig;
use vmm::{DumpCpuConfigError, EventManager, FcExitCode, Vmm};
use vmm_sys_util::tempfile::TempFile;
#[allow(unused_mut, unused_variables)]
fn check_booted_microvm(vmm: Arc<Mutex<Vmm>>, mut evmgr: EventManager) {
// On x86_64, the vmm should exit once its workload completes and signals the exit event.
// On aarch64, the test kernel doesn't exit, so the vmm is force-stopped.
#[cfg(target_arch = "x86_64")]
evmgr.run_with_timeout(500).unwrap();
#[cfg(target_arch = "aarch64")]
vmm.lock().unwrap().stop(FcExitCode::Ok);
assert_eq!(
vmm.lock().unwrap().shutdown_exit_code(),
Some(FcExitCode::Ok)
);
}
#[test]
fn test_build_and_boot_microvm() {
// Error case: no boot source configured.
{
let resources: VmResources = MockVmResources::new().into();
let mut event_manager = EventManager::new().unwrap();
let empty_seccomp_filters = get_empty_filters();
let vmm_ret = build_and_boot_microvm(
&InstanceInfo::default(),
&resources,
&mut event_manager,
&empty_seccomp_filters,
);
assert_eq!(format!("{:?}", vmm_ret.err()), "Some(MissingKernelConfig)");
}
for pci_enabled in [false, true] {
for memory_hotplug in [false, true] {
let (vmm, evmgr) = create_vmm(None, false, true, pci_enabled, memory_hotplug);
check_booted_microvm(vmm, evmgr);
}
}
}
#[allow(unused_mut, unused_variables)]
fn check_build_microvm(vmm: Arc<Mutex<Vmm>>, mut evmgr: EventManager) {
// The built microVM should be in the `VmState::Paused` state here.
assert_eq!(vmm.lock().unwrap().instance_info().state, VmState::Paused);
// The microVM should be able to resume and exit successfully.
// On x86_64, the vmm should exit once its workload completes and signals the exit event.
// On aarch64, the test kernel doesn't exit, so the vmm is force-stopped.
vmm.lock().unwrap().resume_vm().unwrap();
#[cfg(target_arch = "x86_64")]
evmgr.run_with_timeout(500).unwrap();
#[cfg(target_arch = "aarch64")]
vmm.lock().unwrap().stop(FcExitCode::Ok);
assert_eq!(
vmm.lock().unwrap().shutdown_exit_code(),
Some(FcExitCode::Ok)
);
}
#[test]
fn test_build_microvm() {
for pci_enabled in [false, true] {
for memory_hotplug in [false, true] {
let (vmm, evmgr) = create_vmm(None, false, false, pci_enabled, memory_hotplug);
check_build_microvm(vmm, evmgr);
}
}
}
fn pause_resume_microvm(vmm: Arc<Mutex<Vmm>>) {
let mut api_controller = RuntimeApiController::new(VmResources::default(), vmm.clone());
// There's a race between this thread and the vcpu thread, but this thread
// should be able to pause vcpu thread before it finishes running its test-binary.
api_controller.handle_request(VmmAction::Pause).unwrap();
// Pausing again the microVM should not fail (microVM remains in the
// `Paused` state).
api_controller.handle_request(VmmAction::Pause).unwrap();
api_controller.handle_request(VmmAction::Resume).unwrap();
vmm.lock().unwrap().stop(FcExitCode::Ok);
}
#[test]
fn test_pause_resume_microvm() {
for pci_enabled in [false, true] {
for memory_hotplug in [false, true] {
// Tests that pausing and resuming a microVM work as expected.
let (vmm, _) = create_vmm(None, false, true, pci_enabled, memory_hotplug);
pause_resume_microvm(vmm);
}
}
}
#[test]
#[cfg(target_arch = "x86_64")]
fn test_dirty_bitmap_success() {
let vmms = [
vmm::test_utils::dirty_tracking_vmm(Some(NOISY_KERNEL_IMAGE)),
default_vmm(Some(NOISY_KERNEL_IMAGE)),
];
for (vmm, _) in vmms {
// Let it churn for a while and dirty some pages...
thread::sleep(Duration::from_millis(100));
let bitmap = vmm.lock().unwrap().vm.get_dirty_bitmap().unwrap();
let num_dirty_pages: u32 = bitmap
.values()
.map(|bitmap_per_region| {
// Gently coerce to u32
let num_dirty_pages_per_region: u32 =
bitmap_per_region.iter().map(|n| n.count_ones()).sum();
num_dirty_pages_per_region
})
.sum();
assert!(num_dirty_pages > 0);
vmm.lock().unwrap().stop(FcExitCode::Ok);
}
}
#[test]
fn test_disallow_snapshots_without_pausing() {
let (vmm, _) = default_vmm(Some(NOISY_KERNEL_IMAGE));
let vm_info = VmInfo {
mem_size_mib: 1u64,
..Default::default()
};
// Verify saving state while running is not allowed.
assert!(matches!(
vmm.lock().unwrap().save_state(&vm_info),
Err(MicrovmStateError::NotAllowed(_))
));
// Pause microVM.
vmm.lock().unwrap().pause_vm().unwrap();
// It is now allowed.
vmm.lock().unwrap().save_state(&vm_info).unwrap();
// Stop.
vmm.lock().unwrap().stop(FcExitCode::Ok);
}
#[test]
fn test_disallow_dump_cpu_config_without_pausing() {
let (vmm, _) = default_vmm_no_boot(Some(NOISY_KERNEL_IMAGE));
// This call should succeed since the microVM is in the paused state before boot.
vmm.lock().unwrap().dump_cpu_config().unwrap();
// Boot the microVM.
vmm.lock().unwrap().resume_vm().unwrap();
// Verify this call is not allowed while running.
assert!(matches!(
vmm.lock().unwrap().dump_cpu_config(),
Err(DumpCpuConfigError::NotAllowed(_))
));
// Stop the microVM.
vmm.lock().unwrap().stop(FcExitCode::Ok);
}
fn verify_create_snapshot(
is_diff: bool,
pci_enabled: bool,
memory_hotplug: bool,
) -> (TempFile, TempFile) {
let snapshot_file = TempFile::new().unwrap();
let memory_file = TempFile::new().unwrap();
let (vmm, _) = create_vmm(
Some(NOISY_KERNEL_IMAGE),
is_diff,
true,
pci_enabled,
memory_hotplug,
);
let resources = VmResources {
machine_config: MachineConfig {
mem_size_mib: 1,
track_dirty_pages: is_diff,
..Default::default()
},
..Default::default()
};
let vm_info = VmInfo::from(&resources);
let mut controller = RuntimeApiController::new(resources, vmm.clone());
// Be sure that the microVM is running.
thread::sleep(Duration::from_millis(200));
// Pause microVM.
controller.handle_request(VmmAction::Pause).unwrap();
// Create snapshot.
let snapshot_type = match is_diff {
true => SnapshotType::Diff,
false => SnapshotType::Full,
};
let snapshot_params = CreateSnapshotParams {
snapshot_type,
snapshot_path: snapshot_file.as_path().to_path_buf(),
mem_file_path: memory_file.as_path().to_path_buf(),
};
controller
.handle_request(VmmAction::CreateSnapshot(snapshot_params))
.unwrap();
vmm.lock().unwrap().stop(FcExitCode::Ok);
// Check that we can deserialize the microVM state from `snapshot_file`.
let restored_microvm_state: MicrovmState =
Snapshot::load(&mut snapshot_file.as_file()).unwrap().data;
assert_eq!(restored_microvm_state.vm_info, vm_info);
// Verify deserialized data.
// The default vmm has no devices and one vCPU.
assert_eq!(
restored_microvm_state
.device_states
.mmio_state
.block_devices
.len(),
0
);
assert_eq!(
restored_microvm_state
.device_states
.mmio_state
.net_devices
.len(),
0
);
assert!(
restored_microvm_state
.device_states
.mmio_state
.vsock_device
.is_none()
);
assert_eq!(restored_microvm_state.vcpu_states.len(), 1);
(snapshot_file, memory_file)
}
fn verify_load_snapshot(snapshot_file: TempFile, memory_file: TempFile) {
let mut event_manager = EventManager::new().unwrap();
let empty_seccomp_filters = get_empty_filters();
let mut vm_resources = VmResources::default();
let mut preboot_api_controller = PrebootApiController::new(
&empty_seccomp_filters,
InstanceInfo::default(),
&mut vm_resources,
&mut event_manager,
);
preboot_api_controller
.handle_preboot_request(VmmAction::LoadSnapshot(LoadSnapshotParams {
snapshot_path: snapshot_file.as_path().to_path_buf(),
mem_backend: MemBackendConfig {
backend_path: memory_file.as_path().to_path_buf(),
backend_type: MemBackendType::File,
},
track_dirty_pages: false,
resume_vm: true,
network_overrides: vec![],
}))
.unwrap();
let vmm = preboot_api_controller.built_vmm.take().unwrap();
assert_eq!(vmm.lock().unwrap().instance_info.state, VmState::Running);
vmm.lock().unwrap().stop(FcExitCode::Ok);
}
#[test]
fn test_create_and_load_snapshot() {
for diff_snap in [false, true] {
for pci_enabled in [false, true] {
for memory_hotplug in [false, true] {
// Create snapshot.
let (snapshot_file, memory_file) =
verify_create_snapshot(diff_snap, pci_enabled, memory_hotplug);
// Create a new microVm from snapshot. This only tests code-level logic; it verifies
// that a microVM can be built with no errors from given snapshot.
// It does _not_ verify that the guest is actually restored properly. We're using
// python integration tests for that.
verify_load_snapshot(snapshot_file, memory_file);
}
}
}
}
#[test]
fn test_snapshot_load_sanity_checks() {
let microvm_state = get_microvm_state_from_snapshot(false);
check_snapshot(microvm_state);
let microvm_state = get_microvm_state_from_snapshot(true);
check_snapshot(microvm_state);
}
fn check_snapshot(mut microvm_state: MicrovmState) {
use vmm::persist::SnapShotStateSanityCheckError;
snapshot_state_sanity_check(µvm_state).unwrap();
// Remove memory regions.
microvm_state.vm_state.memory.regions.clear();
// Validate sanity checks fail because there is no mem region in state.
assert_eq!(
snapshot_state_sanity_check(µvm_state),
Err(SnapShotStateSanityCheckError::NoMemory)
);
}
fn get_microvm_state_from_snapshot(pci_enabled: bool) -> MicrovmState {
// Create a diff snapshot
let (snapshot_file, _) = verify_create_snapshot(true, pci_enabled, false);
// Deserialize the microVM state.
snapshot_file.as_file().seek(SeekFrom::Start(0)).unwrap();
Snapshot::load(&mut snapshot_file.as_file()).unwrap().data
}
fn verify_load_snap_disallowed_after_boot_resources(res: VmmAction, res_name: &str) {
let (snapshot_file, memory_file) = verify_create_snapshot(false, false, false);
let mut event_manager = EventManager::new().unwrap();
let empty_seccomp_filters = get_empty_filters();
let mut vm_resources = VmResources::default();
let mut preboot_api_controller = PrebootApiController::new(
&empty_seccomp_filters,
InstanceInfo::default(),
&mut vm_resources,
&mut event_manager,
);
preboot_api_controller.handle_preboot_request(res).unwrap();
// Load snapshot should no longer be allowed.
let req = VmmAction::LoadSnapshot(LoadSnapshotParams {
snapshot_path: snapshot_file.as_path().to_path_buf(),
mem_backend: MemBackendConfig {
backend_path: memory_file.as_path().to_path_buf(),
backend_type: MemBackendType::File,
},
track_dirty_pages: false,
resume_vm: false,
network_overrides: vec![],
});
let err = preboot_api_controller.handle_preboot_request(req);
assert!(
matches!(
err.unwrap_err(),
VmmActionError::LoadSnapshot(LoadSnapshotError::LoadSnapshotNotAllowed)
),
"LoadSnapshot should be disallowed after {}",
res_name
);
}
#[test]
fn test_preboot_load_snap_disallowed_after_boot_resources() {
let tmp_file = TempFile::new().unwrap();
let tmp_file = tmp_file.as_path().to_str().unwrap().to_string();
// Verify LoadSnapshot not allowed after configuring various boot-specific resources.
let req = VmmAction::ConfigureBootSource(BootSourceConfig {
kernel_image_path: tmp_file.clone(),
..Default::default()
});
verify_load_snap_disallowed_after_boot_resources(req, "ConfigureBootSource");
let config = BlockDeviceConfig {
drive_id: String::new(),
partuuid: None,
is_root_device: false,
cache_type: CacheType::Unsafe,
is_read_only: Some(false),
path_on_host: Some(tmp_file),
rate_limiter: None,
file_engine_type: None,
socket: None,
};
let req = VmmAction::InsertBlockDevice(config);
verify_load_snap_disallowed_after_boot_resources(req, "InsertBlockDevice");
let req = VmmAction::InsertNetworkDevice(NetworkInterfaceConfig {
iface_id: String::new(),
host_dev_name: String::new(),
guest_mac: None,
rx_rate_limiter: None,
tx_rate_limiter: None,
});
verify_load_snap_disallowed_after_boot_resources(req, "InsertNetworkDevice");
let req = VmmAction::SetBalloonDevice(BalloonDeviceConfig::default());
verify_load_snap_disallowed_after_boot_resources(req, "SetBalloonDevice");
let req = VmmAction::SetVsockDevice(VsockDeviceConfig {
vsock_id: Some(String::new()),
guest_cid: 0,
uds_path: String::new(),
});
verify_load_snap_disallowed_after_boot_resources(req, "SetVsockDevice");
let req =
VmmAction::UpdateMachineConfiguration(MachineConfigUpdate::from(MachineConfig::default()));
verify_load_snap_disallowed_after_boot_resources(req, "SetVmConfiguration");
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/tests/io_uring.rs | src/vmm/tests/io_uring.rs | // Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::cast_possible_truncation, clippy::tests_outside_test_module)]
use std::os::unix::fs::FileExt;
use std::os::unix::io::AsRawFd;
use std::thread;
use std::time::Duration;
use vm_memory::VolatileMemory;
use vmm::vstate::memory::{Bytes, MmapRegion};
use vmm_sys_util::epoll::{ControlOperation, Epoll, EpollEvent, EventSet};
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::tempfile::TempFile;
mod test_utils {
use vm_memory::VolatileMemory;
use vmm::io_uring::operation::{OpCode, Operation};
use vmm::io_uring::{IoUring, IoUringError, SQueueError};
use vmm::vstate::memory::MmapRegion;
fn drain_cqueue(ring: &mut IoUring<usize>) {
while let Some(entry) = ring.pop().unwrap() {
entry.result().unwrap();
}
}
pub fn drive_submission_and_completion(
ring: &mut IoUring<usize>,
mem_region: &MmapRegion,
opcode: OpCode,
num_bytes: usize,
) {
for i in 0..num_bytes {
loop {
let operation = match opcode {
OpCode::Read => Operation::read(
0,
mem_region
.as_volatile_slice()
.subslice(i, 1)
.unwrap()
.ptr_guard_mut()
.as_ptr() as usize,
1,
i as u64,
i,
),
OpCode::Write => Operation::write(
0,
mem_region
.as_volatile_slice()
.subslice(i, 1)
.unwrap()
.ptr_guard_mut()
.as_ptr() as usize,
1,
i as u64,
i,
),
_ => panic!("Only supports read and write."),
};
match ring.push(operation) {
Ok(()) => break,
Err((IoUringError::SQueue(SQueueError::FullQueue), _)) => {
// Stop and wait.
ring.submit_and_wait_all().unwrap();
drain_cqueue(ring);
// Retry this OP
}
Err(_) => panic!("Unexpected error."),
}
}
}
ring.submit_and_wait_all().unwrap();
drain_cqueue(ring);
assert_eq!(ring.pending_sqes().unwrap(), 0);
}
}
use vmm::io_uring::operation::{OpCode, Operation};
use vmm::io_uring::restriction::Restriction;
use vmm::io_uring::{IoUring, IoUringError, SQueueError};
use crate::test_utils::drive_submission_and_completion;
const NUM_ENTRIES: u32 = 128;
#[test]
fn test_ring_new() {
// Invalid entries count: 0.
assert!(matches!(
IoUring::<u8>::new(0, vec![], vec![], None),
Err(IoUringError::Setup(err)) if err.kind() == std::io::ErrorKind::InvalidInput
));
// Try to register too many files.
let dummy_file = TempFile::new().unwrap().into_file();
assert!(matches!(
IoUring::<u8>::new(10, vec![&dummy_file; 40000usize], vec![], None), // Max is 32768.
Err(IoUringError::RegisterFileLimitExceeded)
));
}
#[test]
fn test_eventfd() {
// Test that events get delivered.
let eventfd = EventFd::new(0).unwrap();
let file = TempFile::new().unwrap().into_file();
let mut ring =
IoUring::new(NUM_ENTRIES, vec![&file], vec![], Some(eventfd.as_raw_fd())).unwrap();
let user_data: u8 = 71;
let buf = [0; 4];
let epoll = Epoll::new().unwrap();
let mut ready_event = EpollEvent::default();
epoll
.ctl(
ControlOperation::Add,
eventfd.as_raw_fd(),
EpollEvent::new(EventSet::IN, 0),
)
.unwrap();
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data))
.unwrap();
ring.submit().unwrap();
assert_eq!(
epoll
.wait(500, std::slice::from_mut(&mut ready_event))
.unwrap(),
1
);
assert_eq!(ready_event.event_set(), EventSet::IN);
}
#[test]
fn test_restrictions() {
// Check that only the allowlisted opcodes are permitted.
{
let file = TempFile::new().unwrap().into_file();
let mut ring = IoUring::new(
NUM_ENTRIES,
vec![&file],
vec![
Restriction::RequireFixedFds,
Restriction::AllowOpCode(OpCode::Read),
],
None,
)
.unwrap();
let buf = [0; 4];
// Read operations are allowed.
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, 71))
.unwrap();
assert_eq!(ring.submit_and_wait_all().unwrap(), 1);
ring.pop().unwrap().unwrap().result().unwrap();
// Other operations are not allowed.
ring.push(Operation::write(0, buf.as_ptr() as usize, 4, 0, 71))
.unwrap();
assert_eq!(ring.submit_and_wait_all().unwrap(), 1);
ring.pop().unwrap().unwrap().result().unwrap_err();
}
}
#[test]
fn test_ring_push() {
// Forgot to register file.
{
let buf = [0; 4];
let mut ring = IoUring::new(NUM_ENTRIES, vec![], vec![], None).unwrap();
assert!(matches!(
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, 71)),
Err((IoUringError::NoRegisteredFds, 71))
));
assert_eq!(ring.pending_sqes().unwrap(), 0);
}
// Now register file.
{
let file = TempFile::new().unwrap().into_file();
let mut ring = IoUring::new(NUM_ENTRIES, vec![&file], vec![], None).unwrap();
let user_data: u8 = 71;
let buf = [0; 4];
// Invalid fd.
assert!(matches!(
ring.push(Operation::read(1, buf.as_ptr() as usize, 4, 0, user_data)),
Err((IoUringError::InvalidFixedFd(1), 71))
));
assert_eq!(ring.pending_sqes().unwrap(), 0);
assert_eq!(ring.num_ops(), 0);
// Valid fd.
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data))
.unwrap();
assert_eq!(ring.pending_sqes().unwrap(), 1);
assert_eq!(ring.num_ops(), 1);
// Full Queue.
for _ in 1..(NUM_ENTRIES) {
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data))
.unwrap();
}
assert_eq!(ring.pending_sqes().unwrap(), NUM_ENTRIES);
assert_eq!(ring.num_ops(), NUM_ENTRIES);
assert!(matches!(
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data)),
Err((IoUringError::SQueue(SQueueError::FullQueue), 71))
));
assert_eq!(ring.pending_sqes().unwrap(), NUM_ENTRIES);
assert_eq!(ring.num_ops(), NUM_ENTRIES);
// We didn't get to submit so pop() should return None.
assert!(ring.pop().unwrap().is_none());
assert_eq!(ring.num_ops(), NUM_ENTRIES);
// Full Ring.
ring.submit().unwrap();
// Wait for the io_uring ops to reach the CQ
thread::sleep(Duration::from_millis(150));
for _ in 0..NUM_ENTRIES {
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data))
.unwrap();
}
ring.submit().unwrap();
// Wait for the io_uring ops to reach the CQ
thread::sleep(Duration::from_millis(150));
assert_eq!(ring.num_ops(), NUM_ENTRIES * 2);
// The CQ should be full now
assert!(matches!(
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data)),
Err((IoUringError::FullCQueue, 71))
));
// Check if there are NUM_ENTRIES * 2 cqes
let mut num_cqes = 0;
while let Ok(Some(_entry)) = ring.pop() {
num_cqes += 1;
}
assert_eq!(num_cqes, NUM_ENTRIES * 2);
assert_eq!(ring.num_ops(), 0);
}
}
#[test]
fn test_ring_submit() {
{
let file = TempFile::new().unwrap().into_file();
let mut ring = IoUring::new(NUM_ENTRIES, vec![&file], vec![], None).unwrap();
let user_data: u8 = 71;
let buf = [0; 4];
// Return 0 if we didn't push any sqes.
assert_eq!(ring.submit().unwrap(), 0);
assert_eq!(ring.num_ops(), 0);
// Now push an sqe.
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data))
.unwrap();
assert_eq!(ring.num_ops(), 1);
assert_eq!(ring.submit().unwrap(), 1);
// Now push & submit some more.
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data))
.unwrap();
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data))
.unwrap();
assert_eq!(ring.num_ops(), 3);
assert_eq!(ring.submit().unwrap(), 2);
}
}
#[test]
fn test_submit_and_wait_all() {
let file = TempFile::new().unwrap().into_file();
let mut ring = IoUring::new(NUM_ENTRIES, vec![&file], vec![], None).unwrap();
let user_data: u8 = 71;
let buf = [0; 4];
// Return 0 if we didn't push any sqes.
assert_eq!(ring.submit_and_wait_all().unwrap(), 0);
// Now push an sqe.
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, user_data))
.unwrap();
assert_eq!(ring.pending_sqes().unwrap(), 1);
assert_eq!(ring.num_ops(), 1);
// A correct waiting period yields the completed entries.
assert_eq!(ring.submit_and_wait_all().unwrap(), 1);
assert_eq!(ring.pop().unwrap().unwrap().user_data(), user_data);
assert_eq!(ring.pending_sqes().unwrap(), 0);
assert_eq!(ring.num_ops(), 0);
// Now push, submit & wait for some more entries.
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, 72))
.unwrap();
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, 73))
.unwrap();
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, 74))
.unwrap();
ring.push(Operation::read(0, buf.as_ptr() as usize, 4, 0, 75))
.unwrap();
assert_eq!(ring.pending_sqes().unwrap(), 4);
assert_eq!(ring.num_ops(), 4);
assert_eq!(ring.submit_and_wait_all().unwrap(), 4);
assert_eq!(ring.pending_sqes().unwrap(), 0);
assert_eq!(ring.num_ops(), 4);
assert!(ring.pop().unwrap().is_some());
assert!(ring.pop().unwrap().is_some());
assert!(ring.pop().unwrap().is_some());
assert!(ring.pop().unwrap().is_some());
assert!(ring.pop().unwrap().is_none());
assert_eq!(ring.num_ops(), 0);
}
#[test]
fn test_write() {
// Test that writing the sorted values 1-100 into a file works correctly.
const NUM_BYTES: usize = 100;
// Setup.
let file = TempFile::new().unwrap().into_file();
let mut ring = IoUring::new(NUM_ENTRIES, vec![&file], vec![], None).unwrap();
// Create & init a memory mapping for storing the write buffers.
let mem_region: MmapRegion = MmapRegion::build(
None,
NUM_BYTES,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
)
.unwrap();
let expected_result: Vec<u8> = (0..(NUM_BYTES as u8)).collect();
for i in 0..NUM_BYTES {
mem_region
.as_volatile_slice()
.write_obj(i as u8, i)
.unwrap();
}
// Init the file with all zeros.
file.write_all_at(&[0; NUM_BYTES], 0).unwrap();
// Perform the IO.
drive_submission_and_completion(&mut ring, &mem_region, OpCode::Write, NUM_BYTES);
// Verify the result.
let mut buf = [0u8; NUM_BYTES];
file.read_exact_at(&mut buf, 0).unwrap();
assert_eq!(buf, &expected_result[..]);
}
#[test]
fn test_read() {
// Test that reading the sorted values 1-100 from a file works correctly.
const NUM_BYTES: usize = 100;
// Setup.
let file = TempFile::new().unwrap().into_file();
let mut ring = IoUring::new(NUM_ENTRIES, vec![&file], vec![], None).unwrap();
// Create & init a memory mapping for storing the read buffers.
let mem_region: MmapRegion = MmapRegion::build(
None,
NUM_BYTES,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
)
.unwrap();
// Init the file with 1-100.
let init_contents: Vec<u8> = (0..(NUM_BYTES as u8)).collect();
file.write_all_at(&init_contents, 0).unwrap();
// Perform the IO.
drive_submission_and_completion(&mut ring, &mem_region, OpCode::Read, NUM_BYTES);
let mut buf = [0; NUM_BYTES];
mem_region
.as_volatile_slice()
.read_slice(&mut buf, 0)
.unwrap();
// Verify the result.
assert_eq!(buf, &init_contents[..]);
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/tests/devices.rs | src/vmm/tests/devices.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(
clippy::cast_possible_truncation,
clippy::tests_outside_test_module,
clippy::undocumented_unsafe_blocks
)]
use std::os::raw::{c_int, c_void};
use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::{Arc, Mutex};
use event_manager::{EventManager, SubscriberOps};
use libc::EFD_NONBLOCK;
use vm_superio::Serial;
use vmm::devices::legacy::serial::SerialOut;
use vmm::devices::legacy::{EventFdTrigger, SerialEventsWrapper, SerialWrapper};
use vmm::vstate::bus::BusDevice;
use vmm_sys_util::eventfd::EventFd;
fn create_serial(
pipe: c_int,
) -> Arc<Mutex<SerialWrapper<EventFdTrigger, SerialEventsWrapper, Box<MockSerialInput>>>> {
// Serial input is the reading end of the pipe.
let serial_in = MockSerialInput(pipe);
let kick_stdin_evt = EventFdTrigger::new(EventFd::new(libc::EFD_NONBLOCK).unwrap());
Arc::new(Mutex::new(SerialWrapper {
serial: Serial::with_events(
EventFdTrigger::new(EventFd::new(EFD_NONBLOCK).unwrap()),
SerialEventsWrapper {
buffer_ready_event_fd: Some(kick_stdin_evt.try_clone().unwrap()),
},
SerialOut::Stdout(std::io::stdout()),
),
input: Some(Box::new(serial_in)),
}))
}
#[derive(Debug)]
pub struct MockSerialInput(pub RawFd);
impl std::io::Read for MockSerialInput {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let count = unsafe { libc::read(self.0, buf.as_mut_ptr().cast(), buf.len()) };
usize::try_from(count).map_err(|_| std::io::Error::last_os_error())
}
}
impl AsRawFd for MockSerialInput {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
#[test]
fn test_issue_serial_hangup_anon_pipe_while_registered_stdin() {
let mut fds: [c_int; 2] = [0; 2];
let rc = unsafe { libc::pipe(fds.as_mut_ptr()) };
assert!(rc == 0);
// Serial input is the reading end of the pipe.
let serial = create_serial(fds[0]);
// Make reading fd non blocking to read just what is inflight.
let flags = unsafe { libc::fcntl(fds[0], libc::F_GETFL, 0) };
let mut rc = unsafe { libc::fcntl(fds[0], libc::F_SETFL, flags | libc::O_NONBLOCK) };
assert!(rc == 0);
const BYTES_COUNT: usize = 65; // Serial FIFO_SIZE + 1.
let mut dummy_data = [1u8; BYTES_COUNT];
rc = unsafe {
libc::write(
fds[1],
dummy_data.as_mut_ptr() as *const c_void,
dummy_data.len(),
) as i32
};
assert!(dummy_data.len() == usize::try_from(rc).unwrap());
// Register the reading end of the pipe to the event manager, to be processed later on.
let mut event_manager = EventManager::new().unwrap();
let _id = event_manager.add_subscriber(serial.clone());
// `EventSet::IN` was received on stdin. The event handling will consume
// 64 bytes from stdin. The stdin monitoring is still armed.
let mut ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
let mut data = [0u8; BYTES_COUNT];
// On the main thread, we will simulate guest "vCPU" thread serial reads.
let data_bus_offset = 0;
for i in 0..BYTES_COUNT - 1 {
serial
.lock()
.unwrap()
.read(0x0, data_bus_offset, &mut data[i..=i]);
}
assert!(data[..31] == dummy_data[..31]);
assert!(data[32..64] == dummy_data[32..64]);
// The avail capacity of the serial FIFO is 64.
// Read the 65th from the stdin through the kick stdin event triggered by 64th of the serial
// FIFO read, or by the armed level-triggered stdin monitoring. Either one of the events might
// be handled first. The handling of the second event will find the stdin without any pending
// bytes and will result in EWOULDBLOCK. Usually, EWOULDBLOCK will reregister the stdin, but
// since it was not unregistered before, it will do a noop.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 2);
// The avail capacity of the serial FIFO is 63.
rc = unsafe {
libc::write(
fds[1],
dummy_data.as_mut_ptr() as *const c_void,
dummy_data.len(),
) as i32
};
assert!(dummy_data.len() == usize::try_from(rc).unwrap());
// Writing to the other end of the pipe triggers handling a stdin event.
// Now, 63 bytes will be read from stdin, filling up the buffer.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Close the writing end (this sends an HANG_UP to the reading end).
// While the stdin is registered, this event is caught by the event manager.
rc = unsafe { libc::close(fds[1]) };
assert!(rc == 0);
// This cycle of epoll has two important events. First, the received HANGUP and second
// the fact that the FIFO is full, so even if the stdin reached EOF, there are still
// pending bytes to be read. We still unregister the stdin and keep reading from it until
// we get all pending bytes.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Free up 64 bytes from the serial FIFO.
for i in 0..BYTES_COUNT - 1 {
serial
.lock()
.unwrap()
.read(0x0, data_bus_offset, &mut data[i..=i]);
}
// Process the kick stdin event generated by the reading of the 64th byte of the serial FIFO.
// This will consume some more bytes from the stdin while the stdin is unregistered.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Two more bytes left. At the 2nd byte, another kick read stdin event is generated,
// trying to fill again the serial FIFO with more bytes.
for i in 0..2 {
serial
.lock()
.unwrap()
.read(0x0, data_bus_offset, &mut data[i..=i]);
}
// We try to read again, but we detect that stdin received previously EOF.
// This can be deduced by reading from a non-blocking fd and getting 0 bytes as a result,
// instead of EWOUDBLOCK. We unregister the stdin and the kick stdin read evt.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Nothing can interrupt us.
ev_count = event_manager.run_with_timeout(1).unwrap();
assert_eq!(ev_count, 0);
}
#[test]
fn test_issue_hangup() {
let mut fds: [c_int; 2] = [0; 2];
let rc = unsafe { libc::pipe(fds.as_mut_ptr()) };
assert!(rc == 0);
// Serial input is the reading end of the pipe.
let serial = create_serial(fds[0]);
// Make reading fd non blocking to read just what is inflight.
let flags = unsafe { libc::fcntl(fds[0], libc::F_GETFL, 0) };
let mut rc = unsafe { libc::fcntl(fds[0], libc::F_SETFL, flags | libc::O_NONBLOCK) };
assert!(rc == 0);
// Close the writing end (this sends an HANG_UP to the reading end).
// While the stdin is registered, this event is caught by the event manager.
rc = unsafe { libc::close(fds[1]) };
assert!(rc == 0);
// Register the reading end of the pipe to the event manager, to be processed later on.
let mut event_manager = EventManager::new().unwrap();
let _id = event_manager.add_subscriber(serial);
let mut ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Nothing can interrupt us.
ev_count = event_manager.run_with_timeout(1).unwrap();
assert_eq!(ev_count, 0);
}
#[test]
fn test_issue_serial_hangup_anon_pipe_while_unregistered_stdin() {
let mut fds: [c_int; 2] = [0; 2];
let rc = unsafe { libc::pipe(fds.as_mut_ptr()) };
assert!(rc == 0);
// Serial input is the reading end of the pipe.
let serial = create_serial(fds[0]);
// Make reading fd non blocking to read just what is inflight.
let flags = unsafe { libc::fcntl(fds[0], libc::F_GETFL, 0) };
let mut rc = unsafe { libc::fcntl(fds[0], libc::F_SETFL, flags | libc::O_NONBLOCK) };
assert!(rc == 0);
const BYTES_COUNT: usize = 65; // Serial FIFO_SIZE + 1.
let mut dummy_data = [1u8; BYTES_COUNT];
rc = unsafe {
libc::write(
fds[1],
dummy_data.as_mut_ptr() as *const c_void,
dummy_data.len(),
) as i32
};
assert!(dummy_data.len() == usize::try_from(rc).unwrap());
// Register the reading end of the pipe to the event manager, to be processed later on.
let mut event_manager = EventManager::new().unwrap();
let _id = event_manager.add_subscriber(serial.clone());
// `EventSet::IN` was received on stdin. The event handling will consume
// 64 bytes from stdin. The stdin monitoring is still armed.
let mut ev_count = event_manager.run_with_timeout(0).unwrap();
assert_eq!(ev_count, 1);
let mut data = [0u8; BYTES_COUNT];
// On the main thread, we will simulate guest "vCPU" thread serial reads.
let data_bus_offset = 0;
for i in 0..BYTES_COUNT - 1 {
serial
.lock()
.unwrap()
.read(0x0, data_bus_offset, &mut data[i..=i]);
}
assert!(data[..31] == dummy_data[..31]);
assert!(data[32..64] == dummy_data[32..64]);
// The avail capacity of the serial FIFO is 64.
// Read the 65th from the stdin through the kick stdin event triggered by 64th of the serial
// FIFO read, or by the armed level-triggered stdin monitoring. Either one of the events might
// be handled first. The handling of the second event will find the stdin without any pending
// bytes and will result in EWOULDBLOCK. Usually, EWOULDBLOCK will reregister the stdin, but
// since it was not unregistered before, it will do a noop.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 2);
// The avail capacity of the serial FIFO is 63.
rc = unsafe {
libc::write(
fds[1],
dummy_data.as_mut_ptr() as *const c_void,
dummy_data.len(),
) as i32
};
assert!(dummy_data.len() == usize::try_from(rc).unwrap());
// Writing to the other end of the pipe triggers handling an stdin event.
// Now, 63 bytes will be read from stdin, filling up the buffer.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Serial FIFO is full, so silence the stdin. We do not need any other interruptions
// until the serial FIFO is freed.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Close the writing end (this sends an HANG_UP to the reading end).
// While the stdin is unregistered, this event is not caught by the event manager.
rc = unsafe { libc::close(fds[1]) };
assert!(rc == 0);
// This would be a blocking epoll_wait, since the buffer is full and stdin is unregistered.
// There is no event that can break the epoll wait loop.
ev_count = event_manager.run_with_timeout(0).unwrap();
assert_eq!(ev_count, 0);
// Free up 64 bytes from the serial FIFO.
for i in 0..BYTES_COUNT - 1 {
serial
.lock()
.unwrap()
.read(0x0, data_bus_offset, &mut data[i..=i]);
}
// Process the kick stdin event generated by the reading of the 64th byte of the serial FIFO.
// This will consume some more bytes from the stdin. Keep in mind that the HANGUP event was
// lost and we do not know that the stdin reached EOF.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Two more bytes left. At the 2nd byte, another kick read stdin event is generated,
// trying to fill again the serial FIFO with more bytes. Keep in mind that the HANGUP event was
// lost and we do not know that the stdin reached EOF.
for i in 0..2 {
serial
.lock()
.unwrap()
.read(0x0, data_bus_offset, &mut data[i..=i]);
}
// We try to read again, but we detect that stdin received previously EOF.
// This can be deduced by reading from a non-blocking fd and getting 0 bytes as a result,
// instead of EWOUDBLOCK. We unregister the stdin and the kick stdin read evt.
ev_count = event_manager.run().unwrap();
assert_eq!(ev_count, 1);
// Nothing can interrupt us.
ev_count = event_manager.run_with_timeout(0).unwrap();
assert_eq!(ev_count, 0);
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/benches/cpu_templates.rs | src/vmm/benches/cpu_templates.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Benchmarking cases:
// * `CustomCpuTemplate` JSON deserialization
// * `CustomCpuTemplate` JSON serialization
use std::mem::size_of_val;
use criterion::{Criterion, criterion_group, criterion_main};
use vmm::cpu_config::templates::CustomCpuTemplate;
use vmm::cpu_config::templates::test_utils::{TEST_TEMPLATE_JSON, build_test_template};
#[inline]
pub fn bench_serialize_cpu_template(cpu_template: &CustomCpuTemplate) {
let _ = serde_json::to_string(cpu_template);
}
#[inline]
pub fn bench_deserialize_cpu_template(cpu_template_str: &str) {
let _ = serde_json::from_str::<CustomCpuTemplate>(cpu_template_str);
}
pub fn cpu_template_benchmark(c: &mut Criterion) {
println!(
"Deserialization test - Template size (JSON string): [{}] bytes.",
TEST_TEMPLATE_JSON.len()
);
let test_cpu_template = build_test_template();
println!(
"Serialization test - Template size: [{}] bytes.",
size_of_val(&test_cpu_template)
);
c.bench_function("deserialize_cpu_template", |b| {
b.iter(|| bench_deserialize_cpu_template(TEST_TEMPLATE_JSON))
});
c.bench_function("serialize_cpu_template", |b| {
b.iter(|| bench_serialize_cpu_template(&test_cpu_template))
});
}
criterion_group! {
name = cpu_template_benches;
config = Criterion::default().sample_size(200).noise_threshold(0.05);
targets = cpu_template_benchmark
}
criterion_main! {
cpu_template_benches
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/benches/memory_access.rs | src/vmm/benches/memory_access.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::undocumented_unsafe_blocks)]
use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use vmm::resources::VmResources;
use vmm::vmm_config::machine_config::{HugePageConfig, MachineConfig};
fn bench_single_page_fault(c: &mut Criterion, configuration: VmResources) {
c.bench_function("page_fault", |b| {
b.iter_batched(
|| {
let memory = configuration.allocate_guest_memory().unwrap();
// Get a pointer to the first memory region (cannot do `.get_slice(GuestAddress(0),
// 1)`, because on ARM64 guest memory does not start at physical
// address 0).
let ptr = memory.first().unwrap().as_ptr();
// fine to return both here, because ptr is not a reference into `memory` (e.g. no
// self-referential structs are happening here)
(memory, ptr)
},
|(_, ptr)| unsafe {
// Cause a single page fault
ptr.write_volatile(1);
},
BatchSize::SmallInput,
)
});
}
pub fn bench_4k_page_fault(c: &mut Criterion) {
bench_single_page_fault(
c,
VmResources {
machine_config: MachineConfig {
vcpu_count: 1,
mem_size_mib: 2,
..Default::default()
},
..Default::default()
},
)
}
pub fn bench_2m_page_fault(c: &mut Criterion) {
bench_single_page_fault(
c,
VmResources {
machine_config: MachineConfig {
vcpu_count: 1,
mem_size_mib: 2,
huge_pages: HugePageConfig::Hugetlbfs2M,
..Default::default()
},
..Default::default()
},
)
}
criterion_group! {
name = memory_access_benches;
config = Criterion::default().noise_threshold(0.05);
targets = bench_4k_page_fault, bench_2m_page_fault
}
criterion_main! {
memory_access_benches
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/benches/queue.rs | src/vmm/benches/queue.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Benchmarking cases:
// * `Queue.pop`
// * `Queue.add_used`
// * `DescriptorChain.next_descriptor`
#![allow(clippy::cast_possible_truncation)]
use std::num::Wrapping;
use criterion::{Criterion, criterion_group, criterion_main};
use vm_memory::GuestAddress;
use vmm::devices::virtio::queue::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE};
use vmm::devices::virtio::test_utils::VirtQueue;
use vmm::test_utils::single_region_mem;
/// Create one chain with n descriptors
/// Descriptor buffers will leave at the offset of 2048 bytes
/// to leave some room for queue objects.
/// We don't really care about sizes of descriptors,
/// so pick 1024.
fn set_dtable_one_chain(rxq: &VirtQueue, n: usize) {
let desc_size = 1024;
for i in 0..n {
rxq.dtable[i].set(
(2048 + desc_size * i) as u64,
desc_size as u32,
VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_NEXT,
(i + 1) as u16,
);
}
rxq.dtable[n - 1].flags.set(VIRTQ_DESC_F_WRITE);
rxq.dtable[n - 1].next.set(0);
rxq.avail.ring[0].set(0);
rxq.avail.idx.set(n as u16);
}
/// Create n chains with 1 descriptors each
/// Descriptor buffers will leave at the offset of 2048 bytes
/// to leave some room for queue objects.
/// We don't really care about sizes of descriptors,
/// so pick 1024.
fn set_dtable_many_chains(rxq: &VirtQueue, n: usize) {
let desc_size = 1024;
for i in 0..n {
rxq.dtable[i].set(
(2048 + desc_size * i) as u64,
desc_size as u32,
VIRTQ_DESC_F_WRITE,
0,
);
rxq.avail.ring[i].set(i as u16);
}
rxq.avail.idx.set(n as u16);
}
pub fn queue_benchmark(c: &mut Criterion) {
let mem = single_region_mem(65562);
let rxq = VirtQueue::new(GuestAddress(0), &mem, 256);
let mut queue = rxq.create_queue();
set_dtable_one_chain(&rxq, 16);
queue.next_avail = Wrapping(0);
let desc = queue.pop().unwrap().unwrap();
c.bench_function("next_descriptor_16", |b| {
b.iter(|| {
let mut head = Some(desc);
while let Some(d) = head {
head = std::hint::black_box(d.next_descriptor());
}
})
});
set_dtable_many_chains(&rxq, 16);
c.bench_function("queue_pop_16", |b| {
b.iter(|| {
queue.next_avail = Wrapping(0);
while let Some(desc) = queue.pop().unwrap() {
std::hint::black_box(desc);
}
})
});
c.bench_function("queue_add_used_16", |b| {
b.iter(|| {
queue.num_added = Wrapping(0);
queue.next_used = Wrapping(0);
for i in 0_u16..16_u16 {
let index = std::hint::black_box(i);
let len = std::hint::black_box(i + 1);
_ = queue.add_used(index, len as u32);
}
})
});
c.bench_function("queue_add_used_256", |b| {
b.iter(|| {
queue.num_added = Wrapping(0);
queue.next_used = Wrapping(0);
for i in 0_u16..256_u16 {
let index = std::hint::black_box(i);
let len = std::hint::black_box(i + 1);
_ = queue.add_used(index, len as u32);
}
})
});
}
criterion_group! {
name = queue_benches;
config = Criterion::default().sample_size(1000).noise_threshold(0.15);
targets = queue_benchmark
}
criterion_main! {
queue_benches
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/vmm/benches/block_request.rs | src/vmm/benches/block_request.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Benchmarking cases:
// * `Queue.pop`
// * `Queue.add_used`
// * `DescriptorChain.next_descriptor`
use criterion::{Criterion, criterion_group, criterion_main};
use vm_memory::GuestAddress;
use vmm::devices::virtio::block::virtio::test_utils::RequestDescriptorChain;
use vmm::devices::virtio::block::virtio::{Request, RequestHeader, VIRTIO_BLK_T_IN};
use vmm::devices::virtio::test_utils::VirtQueue;
use vmm::test_utils::single_region_mem;
pub fn block_request_benchmark(c: &mut Criterion) {
let mem = single_region_mem(65562);
let virt_queue = VirtQueue::new(GuestAddress(0), &mem, 16);
// We don't really care about what request is. We just
// need it to be valid.
let chain = RequestDescriptorChain::new(&virt_queue);
let request_header = RequestHeader::new(VIRTIO_BLK_T_IN, 99);
chain.set_header(request_header);
let mut queue = virt_queue.create_queue();
let desc = queue.pop().unwrap().unwrap();
c.bench_function("request_parse", |b| {
b.iter(|| {
let desc = std::hint::black_box(&desc);
_ = Request::parse(desc, &mem, 1024);
})
});
}
criterion_group! {
name = block_request_benches;
config = Criterion::default().sample_size(1000).noise_threshold(0.05);
targets = block_request_benchmark
}
criterion_main! {
block_request_benches
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/build.rs | src/cpu-template-helper/build.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::io::Write;
const MOCK_KERNEL_PATH: &str = "src/utils/mock_kernel/kernel.bin";
// Kernel header for aarch64 that comes from the kernel doc Documentation/arm64/booting.txt.
#[derive(Default)]
#[repr(C, packed)]
struct KernelHeader {
code0: u32, // Executable code
code1: u32, // Executable code
text_offset: u64, // Image load offset,
image_size: u64, // Effective Image size, little endian
flags: u64, // kernel flags, little endian
res2: u64, // reserved
res3: u64, // reserved
res4: u64, // reserved
magic: u32, // Magic number, little endian, "ARM\x64"
res5: u32, // reserved (used for PE COFF offset)
}
fn main() {
if cfg!(target_arch = "x86_64") {
println!("cargo:rerun-if-changed=src/utils/mock_kernel/main.c");
let status = std::process::Command::new("gcc")
.args([
// Do not use the standard system startup files or libraries when linking.
"-nostdlib",
// Prevents linking with the shared libraries.
"-static",
// Do not generate unwind tables.
"-fno-asynchronous-unwind-tables",
// Remove all symbol table and relocation information.
"-s",
"-o",
MOCK_KERNEL_PATH,
"src/utils/mock_kernel/main.c",
])
.status()
.expect("Failed to execute gcc command");
if !status.success() {
panic!("Failed to compile mock kernel");
}
} else if cfg!(target_arch = "aarch64") {
let header = KernelHeader {
magic: 0x644D5241,
..std::default::Default::default()
};
// SAFETY: This is safe as long as `header` is valid as `KernelHeader`.
let header_bytes = unsafe {
std::slice::from_raw_parts(
(&header as *const KernelHeader).cast::<u8>(),
std::mem::size_of::<KernelHeader>(),
)
};
let mut file = std::fs::File::create(MOCK_KERNEL_PATH).expect("Failed to create a file");
file.write_all(header_bytes)
.expect("Failed to write kernel header to a file");
} else {
panic!("Unsupported arch");
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/main.rs | src/cpu-template-helper/src/main.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::{read_to_string, write};
use std::path::PathBuf;
use clap::{Parser, Subcommand, ValueEnum};
use vmm::cpu_config::templates::{GetCpuTemplate, GetCpuTemplateError};
mod fingerprint;
mod template;
mod utils;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
enum HelperError {
/// Failed to operate file: {0}
FileIo(#[from] std::io::Error),
/// {0}
FingerprintCompare(#[from] fingerprint::compare::FingerprintCompareError),
/// {0}
FingerprintDump(#[from] fingerprint::dump::FingerprintDumpError),
/// CPU template is not specified: {0}
NoCpuTemplate(#[from] GetCpuTemplateError),
/// Failed to serialize/deserialize JSON file: {0}
Serde(#[from] serde_json::Error),
/// {0}
Utils(#[from] utils::UtilsError),
/// {0}
TemplateDump(#[from] template::dump::DumpError),
/// {0}
TemplateStrip(#[from] template::strip::StripError),
/// {0}
TemplateVerify(#[from] template::verify::VerifyError),
}
#[derive(Debug, Parser)]
#[command(version = format!("v{}", crate::utils::CPU_TEMPLATE_HELPER_VERSION))]
struct Cli {
#[command(subcommand)]
command: Command,
}
#[derive(Debug, Subcommand)]
enum Command {
/// Template-related operations
#[command(subcommand)]
Template(TemplateOperation),
/// Fingerprint-related operations
#[command(subcommand)]
Fingerprint(FingerprintOperation),
}
#[derive(Debug, Subcommand)]
enum TemplateOperation {
/// Dump guest CPU configuration in the custom CPU template format.
Dump {
/// Path of firecracker config file.
#[arg(short, long, value_name = "PATH")]
config: Option<PathBuf>,
/// Path of CPU template to apply.
#[arg(short, long, value_name = "PATH")]
template: Option<PathBuf>,
/// Path of output file.
#[arg(short, long, value_name = "PATH", default_value = "cpu_config.json")]
output: PathBuf,
},
/// Strip entries shared between multiple CPU template files.
Strip {
/// List of paths of input CPU configuration files.
#[arg(short, long, value_name = "PATH", num_args = 2..)]
paths: Vec<PathBuf>,
/// Suffix of output files. To overwrite input files, specify an empty string ''.
#[arg(short, long, default_value = "_stripped")]
suffix: String,
},
/// Verify that the given CPU template file is applied as intended.
Verify {
/// Path of firecracker config file.
#[arg(short, long, value_name = "PATH")]
config: Option<PathBuf>,
/// Path of the target CPU template.
#[arg(short, long, value_name = "PATH")]
template: Option<PathBuf>,
},
}
#[derive(Debug, Subcommand)]
enum FingerprintOperation {
/// Dump fingerprint consisting of host-related information and guest CPU config.
Dump {
/// Path of firecracker config file.
#[arg(short, long, value_name = "PATH")]
config: Option<PathBuf>,
/// Path of CPU template to apply.
#[arg(short, long, value_name = "PATH")]
template: Option<PathBuf>,
/// Path of output file.
#[arg(short, long, value_name = "PATH", default_value = "fingerprint.json")]
output: PathBuf,
},
/// Compare two fingerprint files with queries.
Compare {
/// Path of fingerprint file that stores the previous state at CPU template creation.
#[arg(short, long, value_name = "PATH")]
prev: PathBuf,
/// Path of fingerprint file that stores the current state.
#[arg(short, long, value_name = "PATH")]
curr: PathBuf,
/// List of fields to be compared.
#[arg(
short,
long,
value_enum,
num_args = 1..,
default_values_t = fingerprint::FingerprintField::value_variants()
)]
filters: Vec<fingerprint::FingerprintField>,
},
}
fn run(cli: Cli) -> Result<(), HelperError> {
match cli.command {
Command::Template(op) => match op {
TemplateOperation::Dump {
config,
template,
output,
} => {
let config = config.map(read_to_string).transpose()?;
let template = template
.as_ref()
.map(utils::load_cpu_template)
.transpose()?;
let (vmm, _) = utils::build_microvm_from_config(config, template)?;
let cpu_config = template::dump::dump(vmm)?;
let cpu_config_json = serde_json::to_string_pretty(&cpu_config)?;
write(output, cpu_config_json)?;
}
TemplateOperation::Strip { paths, suffix } => {
let templates = paths
.iter()
.map(utils::load_cpu_template)
.collect::<Result<Vec<_>, utils::UtilsError>>()?;
let stripped_templates = template::strip::strip(templates)?;
for (path, template) in paths.into_iter().zip(stripped_templates.into_iter()) {
let path = utils::add_suffix(&path, &suffix);
let template_json = serde_json::to_string_pretty(&template)?;
write(path, template_json)?;
}
}
TemplateOperation::Verify { config, template } => {
let config = config.map(read_to_string).transpose()?;
let template = template
.as_ref()
.map(utils::load_cpu_template)
.transpose()?;
let (vmm, vm_resources) = utils::build_microvm_from_config(config, template)?;
let cpu_template = vm_resources
.machine_config
.cpu_template
.get_cpu_template()?
.into_owned();
let cpu_config = template::dump::dump(vmm)?;
template::verify::verify(cpu_template, cpu_config)?;
}
},
Command::Fingerprint(op) => match op {
FingerprintOperation::Dump {
config,
template,
output,
} => {
let config = config.map(read_to_string).transpose()?;
let template = template
.as_ref()
.map(utils::load_cpu_template)
.transpose()?;
let (vmm, _) = utils::build_microvm_from_config(config, template)?;
let fingerprint = fingerprint::dump::dump(vmm)?;
let fingerprint_json = serde_json::to_string_pretty(&fingerprint)?;
write(output, fingerprint_json)?;
}
FingerprintOperation::Compare {
prev,
curr,
filters,
} => {
let prev_json = read_to_string(prev)?;
let prev = serde_json::from_str(&prev_json)?;
let curr_json = read_to_string(curr)?;
let curr = serde_json::from_str(&curr_json)?;
fingerprint::compare::compare(prev, curr, filters)?;
}
},
}
Ok(())
}
fn main() -> std::process::ExitCode {
let cli = Cli::parse();
let result = run(cli);
if let Err(e) = result {
eprintln!("{}", e);
std::process::ExitCode::FAILURE
} else {
std::process::ExitCode::SUCCESS
}
}
#[cfg(test)]
mod tests {
use std::io::Write;
use vmm_sys_util::tempfile::TempFile;
use super::*;
// Sample modifiers for x86_64 that should work correctly as a CPU template and a guest CPU
// config.
// * CPUID leaf 0x0 / subleaf 0x0 / register eax indicates the maximum input EAX value for basic
// CPUID information.
// * MSR index 0x4b564d00 indicates MSR_KVM_WALL_CLOCK_NEW.
#[cfg(target_arch = "x86_64")]
const SAMPLE_MODIFIERS: &str = r#"
{
"cpuid_modifiers": [
{
"leaf": "0x0",
"subleaf": "0x0",
"flags": 0,
"modifiers": [
{
"register": "eax",
"bitmap": "0b00000000000000000000000000000001"
}
]
}
],
"msr_modifiers": [
{
"addr": "0x4b564d00",
"bitmap": "0b0000000000000000000000000000000000000000000000000000000000000001"
}
]
}"#;
// Sample modifiers for aarch64 that should work correctly as a CPU template and a guest CPU
// config.
// * Register ID 0x6030000000100002 indicates X1 register.
#[cfg(target_arch = "aarch64")]
const SAMPLE_MODIFIERS: &str = r#"
{
"reg_modifiers": [
{
"addr": "0x6030000000100002",
"bitmap": "0b00000001"
}
]
}"#;
// Build a sample custom CPU template.
fn generate_sample_template() -> TempFile {
let file = TempFile::new().unwrap();
file.as_file()
.write_all(SAMPLE_MODIFIERS.as_bytes())
.unwrap();
file
}
// Build a sample fingerprint file.
fn generate_sample_fingerprint() -> TempFile {
let fingerprint = fingerprint::Fingerprint {
firecracker_version: crate::utils::CPU_TEMPLATE_HELPER_VERSION.to_string(),
kernel_version: "sample_kernel_version".to_string(),
microcode_version: "sample_microcode_version".to_string(),
bios_version: "sample_bios_version".to_string(),
bios_revision: "sample_bios_revision".to_string(),
guest_cpu_config: serde_json::from_str(SAMPLE_MODIFIERS).unwrap(),
};
let file = TempFile::new().unwrap();
file.as_file()
.write_all(
serde_json::to_string_pretty(&fingerprint)
.unwrap()
.as_bytes(),
)
.unwrap();
file
}
#[test]
fn test_template_dump_command() {
let output_file = TempFile::new().unwrap();
let args = vec![
"cpu-template-helper",
"template",
"dump",
"--output",
output_file.as_path().to_str().unwrap(),
];
let cli = Cli::parse_from(args);
run(cli).unwrap();
}
#[test]
fn test_template_strip_command() {
let files = [generate_sample_template(), generate_sample_template()];
let mut args = vec!["cpu-template-helper", "template", "strip", "-p"];
let paths = files
.iter()
.map(|file| file.as_path().to_str().unwrap())
.collect::<Vec<_>>();
args.extend(paths);
let cli = Cli::parse_from(args);
run(cli).unwrap();
}
#[test]
fn test_template_verify_command() {
let template_file = generate_sample_template();
let args = vec![
"cpu-template-helper",
"template",
"verify",
"--template",
template_file.as_path().to_str().unwrap(),
];
let cli = Cli::parse_from(args);
run(cli).unwrap();
}
#[test]
fn test_fingerprint_dump_command() {
let output_file = TempFile::new().unwrap();
let args = vec![
"cpu-template-helper",
"fingerprint",
"dump",
"--output",
output_file.as_path().to_str().unwrap(),
];
let cli = Cli::parse_from(args);
run(cli).unwrap();
}
#[test]
fn test_fingerprint_compare_command() {
let fingerprint_file1 = generate_sample_fingerprint();
let fingerprint_file2 = generate_sample_fingerprint();
let filters = fingerprint::FingerprintField::value_variants()
.iter()
.map(|variant| variant.to_possible_value().unwrap().get_name().to_string())
.collect::<Vec<_>>();
let mut args = vec![
"cpu-template-helper",
"fingerprint",
"compare",
"--prev",
fingerprint_file1.as_path().to_str().unwrap(),
"--curr",
fingerprint_file2.as_path().to_str().unwrap(),
"--filters",
];
for filter in &filters {
args.push(filter);
}
let cli = Cli::parse_from(args);
run(cli).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/utils/x86_64.rs | src/cpu-template-helper/src/utils/x86_64.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::fmt::Display;
use vmm::cpu_config::templates::RegisterValueFilter;
use vmm::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use vmm::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegister, CpuidRegisterModifier, RegisterModifier,
};
use super::ModifierMapKey;
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub struct CpuidModifierMapKey {
pub leaf: u32,
pub subleaf: u32,
pub flags: KvmCpuidFlags,
pub register: CpuidRegister,
}
impl ModifierMapKey for CpuidModifierMapKey {}
impl Display for CpuidModifierMapKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"leaf={:#x}, subleaf={:#x}, flags={:#b}, register={}",
self.leaf,
self.subleaf,
self.flags.0,
format!("{:?}", self.register).to_lowercase()
)
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct CpuidModifierMap(pub HashMap<CpuidModifierMapKey, RegisterValueFilter<u32>>);
impl From<Vec<CpuidLeafModifier>> for CpuidModifierMap {
fn from(leaf_modifiers: Vec<CpuidLeafModifier>) -> Self {
let mut map = HashMap::new();
for leaf_modifier in leaf_modifiers {
for reg_modifier in leaf_modifier.modifiers {
map.insert(
CpuidModifierMapKey {
leaf: leaf_modifier.leaf,
subleaf: leaf_modifier.subleaf,
flags: leaf_modifier.flags,
register: reg_modifier.register,
},
reg_modifier.bitmap,
);
}
}
CpuidModifierMap(map)
}
}
impl From<CpuidModifierMap> for Vec<CpuidLeafModifier> {
fn from(modifier_map: CpuidModifierMap) -> Self {
let mut leaf_modifiers = Vec::<CpuidLeafModifier>::new();
for (modifier_key, modifier_value) in modifier_map.0 {
let leaf_modifier = leaf_modifiers.iter_mut().find(|leaf_modifier| {
leaf_modifier.leaf == modifier_key.leaf
&& leaf_modifier.subleaf == modifier_key.subleaf
&& leaf_modifier.flags == modifier_key.flags
});
if let Some(leaf_modifier) = leaf_modifier {
leaf_modifier.modifiers.push(CpuidRegisterModifier {
register: modifier_key.register,
bitmap: modifier_value,
});
} else {
leaf_modifiers.push(CpuidLeafModifier {
leaf: modifier_key.leaf,
subleaf: modifier_key.subleaf,
flags: modifier_key.flags,
modifiers: vec![CpuidRegisterModifier {
register: modifier_key.register,
bitmap: modifier_value,
}],
});
}
}
leaf_modifiers.sort_by_key(|leaf_modifier| (leaf_modifier.leaf, leaf_modifier.subleaf));
leaf_modifiers.iter_mut().for_each(|leaf_modifier| {
leaf_modifier
.modifiers
.sort_by_key(|reg_modifier| reg_modifier.register.clone())
});
leaf_modifiers
}
}
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub struct MsrModifierMapKey(pub u32);
impl ModifierMapKey for MsrModifierMapKey {}
impl Display for MsrModifierMapKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "index={:#x}", self.0)
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct MsrModifierMap(pub HashMap<MsrModifierMapKey, RegisterValueFilter<u64>>);
impl From<Vec<RegisterModifier>> for MsrModifierMap {
fn from(modifiers: Vec<RegisterModifier>) -> Self {
let mut map = HashMap::new();
for modifier in modifiers {
map.insert(MsrModifierMapKey(modifier.addr), modifier.bitmap);
}
MsrModifierMap(map)
}
}
impl From<MsrModifierMap> for Vec<RegisterModifier> {
fn from(modifier_map: MsrModifierMap) -> Self {
let mut modifier_vec = modifier_map
.0
.into_iter()
.map(|(modifier_key, modifier_value)| RegisterModifier {
addr: modifier_key.0,
bitmap: modifier_value,
})
.collect::<Vec<_>>();
modifier_vec.sort_by_key(|modifier| modifier.addr);
modifier_vec
}
}
macro_rules! cpuid_reg_modifier {
($register:expr, $value:expr) => {
CpuidRegisterModifier {
register: $register,
bitmap: RegisterValueFilter {
filter: u32::MAX.into(),
value: $value,
},
}
};
($register:expr, $value:expr, $filter:expr) => {
CpuidRegisterModifier {
register: $register,
bitmap: RegisterValueFilter {
filter: $filter,
value: $value,
},
}
};
}
macro_rules! cpuid_leaf_modifier {
($leaf:expr, $subleaf:expr, $flags:expr, $reg_modifiers:expr) => {
CpuidLeafModifier {
leaf: $leaf,
subleaf: $subleaf,
flags: $flags,
modifiers: $reg_modifiers,
}
};
}
macro_rules! msr_modifier {
($addr:expr, $value:expr) => {
RegisterModifier {
addr: $addr,
bitmap: RegisterValueFilter {
filter: u64::MAX,
value: $value,
},
}
};
($addr:expr, $value:expr, $filter:expr) => {
RegisterModifier {
addr: $addr,
bitmap: RegisterValueFilter {
filter: $filter,
value: $value,
},
}
};
}
pub(crate) use {cpuid_leaf_modifier, cpuid_reg_modifier, msr_modifier};
#[cfg(test)]
mod tests {
use vmm::cpu_config::x86_64::custom_cpu_template::CpuidRegister::*;
use vmm::cpu_config::x86_64::custom_cpu_template::CpuidRegisterModifier;
use super::*;
use crate::utils::x86_64::{cpuid_leaf_modifier, cpuid_reg_modifier, msr_modifier};
macro_rules! cpuid_modifier_map {
($leaf:expr, $subleaf:expr, $flags:expr, $register:expr, $value:expr) => {
(
CpuidModifierMapKey {
leaf: $leaf,
subleaf: $subleaf,
flags: $flags,
register: $register,
},
RegisterValueFilter {
filter: u32::MAX.into(),
value: $value,
},
)
};
}
macro_rules! msr_modifier_map {
($addr:expr, $value:expr) => {
(
MsrModifierMapKey($addr),
RegisterValueFilter {
filter: u64::MAX.into(),
value: $value,
},
)
};
}
#[test]
fn test_format_cpuid_modifier_map_key() {
let key = CpuidModifierMapKey {
leaf: 0x0,
subleaf: 0x1,
flags: KvmCpuidFlags::STATEFUL_FUNC,
register: Edx,
};
assert_eq!(
key.to_string(),
"leaf=0x0, subleaf=0x1, flags=0b10, register=edx",
)
}
#[rustfmt::skip]
fn build_sample_cpuid_modifier_vec() -> Vec<CpuidLeafModifier> {
vec![
cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0x0),
]),
cpuid_leaf_modifier!(0x1, 0x2, KvmCpuidFlags::SIGNIFICANT_INDEX, vec![
cpuid_reg_modifier!(Ebx, 0x3),
cpuid_reg_modifier!(Ecx, 0x4),
]),
]
}
#[rustfmt::skip]
fn build_sample_cpuid_modifier_map() -> CpuidModifierMap {
CpuidModifierMap(HashMap::from([
cpuid_modifier_map!(0x0, 0x0, KvmCpuidFlags::EMPTY, Eax, 0x0),
cpuid_modifier_map!(0x1, 0x2, KvmCpuidFlags::SIGNIFICANT_INDEX, Ebx, 0x3),
cpuid_modifier_map!(0x1, 0x2, KvmCpuidFlags::SIGNIFICANT_INDEX, Ecx, 0x4),
]))
}
#[test]
fn test_cpuid_modifier_from_vec_to_map() {
let modifier_vec = build_sample_cpuid_modifier_vec();
let modifier_map = build_sample_cpuid_modifier_map();
assert_eq!(CpuidModifierMap::from(modifier_vec), modifier_map);
}
#[test]
fn test_cpuid_modifier_from_map_to_vec() {
let modifier_map = build_sample_cpuid_modifier_map();
let modifier_vec = build_sample_cpuid_modifier_vec();
assert_eq!(Vec::<CpuidLeafModifier>::from(modifier_map), modifier_vec);
}
#[test]
fn test_format_msr_modifier_map_key() {
let key = MsrModifierMapKey(0x1234);
assert_eq!(key.to_string(), "index=0x1234");
}
fn build_sample_msr_modifier_vec() -> Vec<RegisterModifier> {
vec![
msr_modifier!(0x0, 0x0),
msr_modifier!(0x1, 0x2),
msr_modifier!(0x3, 0x2),
]
}
fn build_sample_msr_modifier_map() -> MsrModifierMap {
MsrModifierMap(HashMap::from([
msr_modifier_map!(0x0, 0x0),
msr_modifier_map!(0x1, 0x2),
msr_modifier_map!(0x3, 0x2),
]))
}
#[test]
fn test_msr_modifier_from_vec_to_map() {
let modifier_vec = build_sample_msr_modifier_vec();
let modifier_map = build_sample_msr_modifier_map();
assert_eq!(MsrModifierMap::from(modifier_vec), modifier_map);
}
#[test]
fn test_msr_modifier_from_map_to_vec() {
let modifier_map = build_sample_msr_modifier_map();
let modifier_vec = build_sample_msr_modifier_vec();
assert_eq!(Vec::<RegisterModifier>::from(modifier_map), modifier_vec);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/utils/mod.rs | src/cpu-template-helper/src/utils/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::ffi::OsString;
use std::fmt::Display;
use std::fs::read_to_string;
use std::hash::Hash;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use vmm::builder::{StartMicrovmError, build_microvm_for_boot};
use vmm::cpu_config::templates::{CustomCpuTemplate, Numeric};
use vmm::resources::VmResources;
use vmm::seccomp::get_empty_filters;
use vmm::vmm_config::instance_info::{InstanceInfo, VmState};
use vmm::{EventManager, HTTP_MAX_PAYLOAD_SIZE, Vmm};
use vmm_sys_util::tempfile::TempFile;
#[cfg(target_arch = "aarch64")]
pub mod aarch64;
#[cfg(target_arch = "x86_64")]
pub mod x86_64;
pub const CPU_TEMPLATE_HELPER_VERSION: &str = env!("CARGO_PKG_VERSION");
/// Trait for key of `HashMap`-based modifier.
///
/// This is a wrapper trait of some traits required for a key of `HashMap` modifier.
pub trait ModifierMapKey: Eq + PartialEq + Hash + Display + Clone {}
pub trait DiffString<V> {
// Generate a string to display difference of filtered values between CPU template and guest
// CPU config.
#[rustfmt::skip]
fn to_diff_string(template: V, config: V) -> String;
}
impl<V: Numeric> DiffString<V> for V {
// Generate a string to display difference of filtered values between CPU template and guest
// CPU config.
#[rustfmt::skip]
fn to_diff_string(template: V, config: V) -> String {
let mut diff = String::new();
for i in (0..V::BITS).rev() {
let mask = V::one() << i;
let template_bit = template & mask;
let config_bit = config & mask;
diff.push(match template_bit == config_bit {
true => ' ',
false => '^',
});
}
format!(
"* CPU template : 0b{template:0width$b}\n\
* CPU configuration: 0b{config:0width$b}\n\
* Diff : {diff}",
width = V::BITS as usize,
)
}
}
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum UtilsError {
/// Failed to create VmResources: {0}
CreateVmResources(vmm::resources::ResourcesError),
/// Failed to build microVM: {0}
BuildMicroVm(#[from] StartMicrovmError),
/// Failed to create temporary file: {0}
CreateTempFile(#[from] vmm_sys_util::errno::Error),
/// Failed to operate file: {0}
FileIo(#[from] std::io::Error),
/// Failed to serialize/deserialize JSON file: {0}
Serde(#[from] serde_json::Error),
}
pub fn load_cpu_template(path: &PathBuf) -> Result<CustomCpuTemplate, UtilsError> {
let template_json = read_to_string(path)?;
let template = serde_json::from_str(&template_json)?;
Ok(template)
}
// Utility function to prepare scratch kernel image and rootfs and build mock Firecracker config.
fn build_mock_config() -> Result<(TempFile, TempFile, String), UtilsError> {
let kernel = TempFile::new()?;
kernel
.as_file()
.write_all(include_bytes!("mock_kernel/kernel.bin"))?;
let rootfs = TempFile::new()?;
let config = format!(
r#"{{
"boot-source": {{
"kernel_image_path": "{}"
}},
"drives": [
{{
"drive_id": "rootfs",
"is_root_device": true,
"path_on_host": "{}"
}}
]
}}"#,
// Temporary file path consists of alphanumerics.
kernel.as_path().to_str().unwrap(),
rootfs.as_path().to_str().unwrap(),
);
Ok((kernel, rootfs, config))
}
pub fn build_microvm_from_config(
config: Option<String>,
template: Option<CustomCpuTemplate>,
) -> Result<(Arc<Mutex<Vmm>>, VmResources), UtilsError> {
// Prepare resources from the given config file.
let (_kernel, _rootfs, config) = match config {
Some(config) => (None, None, config),
None => {
let (kernel, rootfs, config) = build_mock_config()?;
(Some(kernel), Some(rootfs), config)
}
};
let instance_info = InstanceInfo {
id: "anonymous-instance".to_string(),
state: VmState::NotStarted,
vmm_version: CPU_TEMPLATE_HELPER_VERSION.to_string(),
app_name: "cpu-template-helper".to_string(),
};
let mut vm_resources =
VmResources::from_json(&config, &instance_info, HTTP_MAX_PAYLOAD_SIZE, None)
.map_err(UtilsError::CreateVmResources)?;
if let Some(template) = template {
vm_resources.set_custom_cpu_template(template);
}
let mut event_manager = EventManager::new().unwrap();
let seccomp_filters = get_empty_filters();
// Build a microVM.
let vmm = build_microvm_for_boot(
&instance_info,
&vm_resources,
&mut event_manager,
&seccomp_filters,
)?;
Ok((vmm, vm_resources))
}
pub fn add_suffix(path: &Path, suffix: &str) -> PathBuf {
// Extract the part of the filename before the extension.
let mut new_file_name = OsString::from(path.file_stem().unwrap());
// Push the suffix and the extension.
new_file_name.push(suffix);
if let Some(ext) = path.extension() {
new_file_name.push(".");
new_file_name.push(ext);
}
// Swap the file name.
path.with_file_name(new_file_name)
}
#[cfg(test)]
pub mod tests {
use std::fmt::Display;
use vmm::resources::VmmConfig;
use super::*;
const SUFFIX: &str = "_suffix";
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub struct MockModifierMapKey(pub u8);
impl ModifierMapKey for MockModifierMapKey {}
impl Display for MockModifierMapKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "ID={:#x}", self.0)
}
}
macro_rules! mock_modifier {
($key:expr, $value:expr) => {
(
MockModifierMapKey($key),
RegisterValueFilter::<u8> {
filter: u8::MAX,
value: $value,
},
)
};
($key:expr, $value:expr, $filter:expr) => {
(
MockModifierMapKey($key),
RegisterValueFilter::<u8> {
filter: $filter,
value: $value,
},
)
};
}
pub(crate) use mock_modifier;
#[test]
fn test_build_mock_config() {
let kernel_path;
let rootfs_path;
{
let (kernel, rootfs, config) = build_mock_config().unwrap();
kernel_path = kernel.as_path().to_path_buf();
rootfs_path = rootfs.as_path().to_path_buf();
// Ensure the kernel exists and its content is written.
assert!(kernel.as_file().metadata().unwrap().len() > 0);
// Ensure the rootfs exists and it is empty.
assert_eq!(rootfs.as_file().metadata().unwrap().len(), 0);
// Ensure the generated config is valid as `VmmConfig`.
serde_json::from_str::<VmmConfig>(&config).unwrap();
}
// Ensure the temporary mock resources are deleted.
assert!(!kernel_path.exists());
assert!(!rootfs_path.exists());
}
#[test]
fn test_build_microvm() {
build_microvm_from_config(None, None).unwrap();
}
#[test]
fn test_add_suffix_filename_only() {
let path = PathBuf::from("file.ext");
let expected = PathBuf::from(format!("file{SUFFIX}.ext"));
assert_eq!(add_suffix(&path, SUFFIX), expected);
}
#[test]
fn test_add_suffix_filename_without_ext() {
let path = PathBuf::from("file_no_ext");
let expected = PathBuf::from(format!("file_no_ext{SUFFIX}"));
assert_eq!(add_suffix(&path, SUFFIX), expected);
}
#[test]
fn test_add_suffix_rel_path() {
let path = PathBuf::from("relative/path/to/file.ext");
let expected = PathBuf::from(format!("relative/path/to/file{SUFFIX}.ext"));
assert_eq!(add_suffix(&path, SUFFIX), expected);
}
#[test]
fn test_add_suffix_abs_path() {
let path = PathBuf::from("/absolute/path/to/file.ext");
let expected = PathBuf::from(format!("/absolute/path/to/file{SUFFIX}.ext"));
assert_eq!(add_suffix(&path, SUFFIX), expected);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/utils/aarch64.rs | src/cpu-template-helper/src/utils/aarch64.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::fmt::Display;
use vmm::cpu_config::aarch64::custom_cpu_template::RegisterModifier;
use vmm::cpu_config::templates::RegisterValueFilter;
use super::ModifierMapKey;
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub struct RegModifierMapKey(pub u64);
impl ModifierMapKey for RegModifierMapKey {}
impl Display for RegModifierMapKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "ID={:#x}", self.0)
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct RegModifierMap(pub HashMap<RegModifierMapKey, RegisterValueFilter<u128>>);
impl From<Vec<RegisterModifier>> for RegModifierMap {
fn from(modifiers: Vec<RegisterModifier>) -> Self {
let mut map = HashMap::new();
for modifier in modifiers {
map.insert(RegModifierMapKey(modifier.addr), modifier.bitmap);
}
RegModifierMap(map)
}
}
impl From<RegModifierMap> for Vec<RegisterModifier> {
fn from(modifier_map: RegModifierMap) -> Self {
let mut modifier_vec = modifier_map
.0
.into_iter()
.map(|(modifier_key, modifier_value)| RegisterModifier {
addr: modifier_key.0,
bitmap: modifier_value,
})
.collect::<Vec<_>>();
modifier_vec.sort_by_key(|modifier| modifier.addr);
modifier_vec
}
}
macro_rules! reg_modifier {
($addr:expr, $value:expr) => {
RegisterModifier {
addr: $addr,
bitmap: RegisterValueFilter {
filter: u128::MAX,
value: $value,
},
}
};
($addr:expr, $value:expr, $filter:expr) => {
RegisterModifier {
addr: $addr,
bitmap: RegisterValueFilter {
filter: $filter,
value: $value,
},
}
};
}
pub(crate) use reg_modifier;
#[cfg(test)]
mod tests {
use super::*;
macro_rules! reg_modifier_map {
($id:expr, $value:expr) => {
(
RegModifierMapKey($id),
RegisterValueFilter {
filter: u128::MAX,
value: $value,
},
)
};
}
#[test]
fn test_format_reg_modifier_map_key() {
let key = RegModifierMapKey(0x1234);
assert_eq!(key.to_string(), "ID=0x1234");
}
fn build_sample_reg_modifier_vec() -> Vec<RegisterModifier> {
vec![
reg_modifier!(0x0, 0x0),
reg_modifier!(0x1, 0x2),
reg_modifier!(0x3, 0x2),
]
}
fn build_sample_reg_modifier_map() -> RegModifierMap {
RegModifierMap(HashMap::from([
reg_modifier_map!(0x0, 0x0),
reg_modifier_map!(0x1, 0x2),
reg_modifier_map!(0x3, 0x2),
]))
}
#[test]
fn test_reg_modifier_from_vec_to_map() {
let modifier_vec = build_sample_reg_modifier_vec();
let modifier_map = build_sample_reg_modifier_map();
assert_eq!(RegModifierMap::from(modifier_vec), modifier_map);
}
#[test]
fn test_reg_modifier_from_map_to_vec() {
let modifier_map = build_sample_reg_modifier_map();
let modifier_vec = build_sample_reg_modifier_vec();
assert_eq!(Vec::<RegisterModifier>::from(modifier_map), modifier_vec);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/mod.rs | src/cpu-template-helper/src/template/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod dump;
pub mod strip;
pub mod verify;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/verify/x86_64.rs | src/cpu-template-helper/src/template/verify/x86_64.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::cpu_config::templates::CustomCpuTemplate;
use super::{VerifyError, verify_common};
use crate::utils::x86_64::{CpuidModifierMap, MsrModifierMap};
pub fn verify(
cpu_template: CustomCpuTemplate,
cpu_config: CustomCpuTemplate,
) -> Result<(), VerifyError> {
let cpuid_template = CpuidModifierMap::from(cpu_template.cpuid_modifiers);
let cpuid_config = CpuidModifierMap::from(cpu_config.cpuid_modifiers);
verify_common(cpuid_template.0, cpuid_config.0)?;
let msr_template = MsrModifierMap::from(cpu_template.msr_modifiers);
let msr_config = MsrModifierMap::from(cpu_config.msr_modifiers);
verify_common(msr_template.0, msr_config.0)?;
Ok(())
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use vmm::cpu_config::templates::RegisterValueFilter;
use vmm::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use vmm::cpu_config::x86_64::custom_cpu_template::CpuidRegister::*;
use vmm::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegisterModifier, RegisterModifier,
};
use super::*;
use crate::utils::x86_64::{
CpuidModifierMapKey, MsrModifierMapKey, cpuid_leaf_modifier, cpuid_reg_modifier,
msr_modifier,
};
macro_rules! cpuid_modifier_map {
($leaf:expr, $subleaf:expr, $flags:expr, $register:expr, $value:expr) => {
(
CpuidModifierMapKey {
leaf: $leaf,
subleaf: $subleaf,
flags: $flags,
register: $register,
},
RegisterValueFilter {
filter: u32::MAX.into(),
value: $value,
},
)
};
}
macro_rules! msr_modifier_map {
($addr:expr, $value:expr) => {
(
MsrModifierMapKey($addr),
RegisterValueFilter {
filter: u64::MAX.into(),
value: $value,
},
)
};
}
#[test]
fn test_format_cpuid_modifier_map_key() {
let key = CpuidModifierMapKey {
leaf: 0x0,
subleaf: 0x1,
flags: KvmCpuidFlags::STATEFUL_FUNC,
register: Edx,
};
assert_eq!(
key.to_string(),
"leaf=0x0, subleaf=0x1, flags=0b10, register=edx",
)
}
#[test]
#[rustfmt::skip]
fn test_cpuid_modifier_from_vec_to_map() {
let modifier_vec = vec![
cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0x0),
]),
cpuid_leaf_modifier!(0x1, 0x2, KvmCpuidFlags::SIGNIFICANT_INDEX, vec![
cpuid_reg_modifier!(Ebx, 0x3),
cpuid_reg_modifier!(Ecx, 0x4),
]),
];
let modifier_map = HashMap::from([
cpuid_modifier_map!(0x0, 0x0, KvmCpuidFlags::EMPTY, Eax, 0x0),
cpuid_modifier_map!(0x1, 0x2, KvmCpuidFlags::SIGNIFICANT_INDEX, Ebx, 0x3),
cpuid_modifier_map!(0x1, 0x2, KvmCpuidFlags::SIGNIFICANT_INDEX, Ecx, 0x4),
]);
assert_eq!(
CpuidModifierMap::from(modifier_vec),
CpuidModifierMap(modifier_map),
);
}
#[test]
fn test_format_msr_modifier_map_key() {
let key = MsrModifierMapKey(0x1234);
assert_eq!(key.to_string(), "index=0x1234");
}
#[test]
fn test_msr_modifier_from_vec_to_map() {
let modifier_vec = vec![
msr_modifier!(0x1, 0x2),
msr_modifier!(0x0, 0x0),
msr_modifier!(0x3, 0x2),
];
let modifier_map = HashMap::from([
msr_modifier_map!(0x0, 0x0),
msr_modifier_map!(0x1, 0x2),
msr_modifier_map!(0x3, 0x2),
]);
assert_eq!(
MsrModifierMap::from(modifier_vec),
MsrModifierMap(modifier_map),
);
}
#[test]
#[rustfmt::skip]
fn test_verify_non_existing_cpuid() {
// Test with a sample whose CPUID exists in template, but not in config.
let template = CustomCpuTemplate {
cpuid_modifiers: vec![cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0b10101010, 0b11110000),
cpuid_reg_modifier!(Ebx, 0b01010101, 0b00001111),
])],
msr_modifiers: vec![],
..Default::default()
};
let config = CustomCpuTemplate {
cpuid_modifiers: vec![cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0b10101010, 0b11111111),
])],
msr_modifiers: vec![],
..Default::default()
};
assert_eq!(
verify(template, config).unwrap_err().to_string(),
"leaf=0x0, subleaf=0x0, flags=0b0, register=ebx not found in CPU configuration."
);
}
#[test]
#[rustfmt::skip]
fn test_verify_mismatched_cpuid() {
// Test with a sample whose CPUID value mismatches.
let template = CustomCpuTemplate {
cpuid_modifiers: vec![cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY,
vec![cpuid_reg_modifier!(Eax, 0b10101010, 0b11110000)]
)],
msr_modifiers: vec![],
..Default::default()
};
let config = CustomCpuTemplate {
cpuid_modifiers: vec![cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY,
vec![cpuid_reg_modifier!(Eax, 0b11111111)]
)],
msr_modifiers: vec![],
..Default::default()
};
assert_eq!(
verify(template, config).unwrap_err().to_string(),
"Value for leaf=0x0, subleaf=0x0, flags=0b0, register=eax mismatched.\n\
* CPU template : 0b00000000000000000000000010100000\n\
* CPU configuration: 0b00000000000000000000000011110000\n\
* Diff : ^ ^ ",
);
}
#[test]
#[rustfmt::skip]
fn test_verify_non_existing_msr() {
// Test with a sample whose MSR exists in template, but not in config.
let template = CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0b00000000),
msr_modifier!(0x1, 0b11111111),
],
..Default::default()
};
let config = CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0b00000000),
],
..Default::default()
};
assert_eq!(
verify(template, config).unwrap_err().to_string(),
"index=0x1 not found in CPU configuration."
);
}
#[test]
#[rustfmt::skip]
fn test_verify_mismatched_msr() {
// Test with a sample whose CPUID value mismatches.
let template = CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0b10101010, 0b11110000),
],
..Default::default()
};
let config = CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0b01010101, 0b11111111)
],
..Default::default()
};
assert_eq!(
verify(template, config).unwrap_err().to_string(),
"Value for index=0x0 mismatched.\n\
* CPU template : 0b0000000000000000000000000000000000000000000000000000000010100000\n\
* CPU configuration: 0b0000000000000000000000000000000000000000000000000000000001010000\n\
* Diff : ^^^^ ",
);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/verify/mod.rs | src/cpu-template-helper/src/template/verify/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::fmt::Debug;
use vmm::cpu_config::templates::{Numeric, RegisterValueFilter};
use crate::utils::{DiffString, ModifierMapKey};
#[cfg(target_arch = "aarch64")]
mod aarch64;
#[cfg(target_arch = "aarch64")]
pub use aarch64::verify;
#[cfg(target_arch = "x86_64")]
mod x86_64;
#[cfg(target_arch = "x86_64")]
pub use x86_64::verify;
#[rustfmt::skip]
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum VerifyError {
/// {0} not found in CPU configuration.
KeyNotFound(String),
/** Value for {0} mismatched.
{1} */
ValueMismatched(String, String),
}
/// Verify that the given CPU template is applied as intended.
///
/// This function is an arch-agnostic part of CPU template verification. As template formats differ
/// between x86_64 and aarch64, the arch-specific part converts the structure to an arch-agnostic
/// `HashMap` implementing `ModifierMapKey` before calling this arch-agnostic function.
pub fn verify_common<K, V>(
template: HashMap<K, RegisterValueFilter<V>>,
config: HashMap<K, RegisterValueFilter<V>>,
) -> Result<(), VerifyError>
where
K: ModifierMapKey + Debug,
V: Numeric + Debug,
{
for (key, template_value_filter) in template {
let config_value_filter = config
.get(&key)
.ok_or_else(|| VerifyError::KeyNotFound(key.to_string()))?;
let template_value = template_value_filter.value & template_value_filter.filter;
let config_value = config_value_filter.value & template_value_filter.filter;
if template_value != config_value {
return Err(VerifyError::ValueMismatched(
key.to_string(),
V::to_diff_string(template_value, config_value),
));
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::tests::{MockModifierMapKey, mock_modifier};
#[test]
fn test_verify_modifier_map_with_non_existing_key() {
// Test with a sample where a key in CPU template is not found in CPU config.
let cpu_template_map = HashMap::from([mock_modifier!(0x0, 0b0000_0000)]);
let cpu_config_map = HashMap::new();
assert_eq!(
verify_common(cpu_template_map, cpu_config_map)
.unwrap_err()
.to_string(),
"ID=0x0 not found in CPU configuration.".to_string()
);
}
#[test]
#[rustfmt::skip]
fn test_verify_modifier_map_with_mismatched_value() {
// Test with a sample whose filtered value mismatches between CPU config and CPU template.
let cpu_template_map =
HashMap::from([mock_modifier!(0x0, 0b0000_0101, 0b0000_1111)]);
let cpu_config_map =
HashMap::from([mock_modifier!(0x0, 0b0000_0000, 0b1111_1111)]);
assert_eq!(
verify_common(cpu_template_map, cpu_config_map)
.unwrap_err()
.to_string(),
"Value for ID=0x0 mismatched.\n\
* CPU template : 0b00000101\n\
* CPU configuration: 0b00000000\n\
* Diff : ^ ^"
)
}
#[test]
fn test_verify_modifier_map_with_valid_value() {
// Test with valid CPU template and CPU config.
let cpu_template_map = HashMap::from([mock_modifier!(0x0, 0b0000_1010, 0b0000_1111)]);
let cpu_config_map = HashMap::from([mock_modifier!(0x0, 0b1010_1010, 0b1111_1111)]);
verify_common(cpu_template_map, cpu_config_map).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/verify/aarch64.rs | src/cpu-template-helper/src/template/verify/aarch64.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::cpu_config::templates::CustomCpuTemplate;
use super::{VerifyError, verify_common};
use crate::utils::aarch64::RegModifierMap;
pub fn verify(
cpu_template: CustomCpuTemplate,
cpu_config: CustomCpuTemplate,
) -> Result<(), VerifyError> {
let reg_template = RegModifierMap::from(cpu_template.reg_modifiers);
let reg_config = RegModifierMap::from(cpu_config.reg_modifiers);
verify_common(reg_template.0, reg_config.0)
}
#[cfg(test)]
mod tests {
use vmm::cpu_config::aarch64::custom_cpu_template::RegisterModifier;
use vmm::cpu_config::templates::RegisterValueFilter;
use super::*;
use crate::utils::aarch64::reg_modifier;
#[test]
#[rustfmt::skip]
fn test_verify_non_existing_reg() {
// Test with a sample whose register exists in template, but not in config.
let template = CustomCpuTemplate {
reg_modifiers: vec![
reg_modifier!(0x0, 0b00000000),
reg_modifier!(0x1, 0b11111111),
],
..Default::default()
};
let config = CustomCpuTemplate {
reg_modifiers: vec![
reg_modifier!(0x0, 0b00000000),
],
..Default::default()
};
assert_eq!(
verify(template, config).unwrap_err().to_string(),
"ID=0x1 not found in CPU configuration."
);
}
#[test]
fn test_verify_mismatched_reg() {
// Test with a sample whose register value mismatches.
let template = CustomCpuTemplate {
reg_modifiers: vec![reg_modifier!(0x0, 0b10101010, 0b11110000)],
..Default::default()
};
let config = CustomCpuTemplate {
reg_modifiers: vec![reg_modifier!(0x0, 0b01010101, 0b11111111)],
..Default::default()
};
assert_eq!(
verify(template, config).unwrap_err().to_string(),
"Value for ID=0x0 mismatched.\n\
* CPU template : 0b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100000\n\
* CPU configuration: 0b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001010000\n\
* Diff : ^^^^ "
)
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/dump/x86_64.rs | src/cpu-template-helper/src/template/dump/x86_64.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::BTreeMap;
use vmm::MSR_RANGE;
use vmm::arch::x86_64::generated::msr_index::*;
use vmm::arch::x86_64::msr::MsrRange;
use vmm::cpu_config::templates::{CpuConfiguration, CustomCpuTemplate, RegisterValueFilter};
use vmm::cpu_config::x86_64::cpuid::common::get_vendor_id_from_host;
use vmm::cpu_config::x86_64::cpuid::{Cpuid, VENDOR_ID_AMD};
use vmm::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegister, CpuidRegisterModifier, RegisterModifier,
};
use crate::utils::x86_64::{cpuid_leaf_modifier, cpuid_reg_modifier, msr_modifier};
/// Convert `&CpuConfiguration` to `CustomCputemplate`.
pub fn config_to_template(cpu_config: &CpuConfiguration) -> CustomCpuTemplate {
CustomCpuTemplate {
cpuid_modifiers: cpuid_to_modifiers(&cpu_config.cpuid),
msr_modifiers: msrs_to_modifier(&cpu_config.msrs),
..Default::default()
}
}
fn cpuid_to_modifiers(cpuid: &Cpuid) -> Vec<CpuidLeafModifier> {
cpuid
.inner()
.iter()
.map(|(key, entry)| {
cpuid_leaf_modifier!(
key.leaf,
key.subleaf,
entry.flags,
vec![
cpuid_reg_modifier!(CpuidRegister::Eax, entry.result.eax),
cpuid_reg_modifier!(CpuidRegister::Ebx, entry.result.ebx),
cpuid_reg_modifier!(CpuidRegister::Ecx, entry.result.ecx),
cpuid_reg_modifier!(CpuidRegister::Edx, entry.result.edx),
]
)
})
.collect()
}
fn msrs_to_modifier(msrs: &BTreeMap<u32, u64>) -> Vec<RegisterModifier> {
let mut msrs: Vec<RegisterModifier> = msrs
.iter()
.map(|(index, value)| msr_modifier!(*index, *value))
.collect();
msrs.retain(|modifier| !should_exclude_msr(modifier.addr));
if &get_vendor_id_from_host().unwrap() == VENDOR_ID_AMD {
msrs.retain(|modifier| !should_exclude_msr_amd(modifier.addr));
}
msrs.sort_by_key(|modifier| modifier.addr);
msrs
}
// List of MSR indices excluded from the CPU configuration dump.
//
// MSRs that vary depending on the elapsed time (e.g., time stamp counter) are not useful, because
// CPU configuration dump is used to check diff between CPU models and detect changes caused by
// Firecracker/KVM/BIOS changes.
//
// Fireracker diables some features (e.g., PMU) and doesn't support some features (e.g., Hyper-V),
// MSRs related to such features are not useful as CPU configuration dump. Excluding such MSRs
// reduces maintenance cost when KVM makes change their default values.
const MSR_EXCLUSION_LIST: [MsrRange; 10] = [
// - MSR_IA32_TSC (0x10): vary depending on the elapsed time.
MSR_RANGE!(MSR_IA32_TSC),
// - MSR_IA32_TSC_DEADLINE (0x6e0): varies depending on the elapsed time.
MSR_RANGE!(MSR_IA32_TSC_DEADLINE),
// Firecracker doesn't support MCE.
// - MSR_IA32_MCG_STATUS (0x17a)
// - MSR_IA32_MCG_EXT_CTL (0x4d0)
MSR_RANGE!(MSR_IA32_MCG_STATUS),
MSR_RANGE!(MSR_IA32_MCG_EXT_CTL),
// - MSR_IA32_PERF_CAPABILITIES (0x345) available if CPUID.01h:ECX[15] = 1 but disabled in the
// CPUID normalization process.
MSR_RANGE!(MSR_IA32_PERF_CAPABILITIES),
// Firecracker doesn't support PEBS (Precise Event-Based Sampling) that is part of Intel's PMU.
// - MSR_IA32_PEBS_ENABLE (0x3F1)
// - MSR_PEBS_DATA_CFG (0x3F2)
// - MSR_IA32_DS_AREA (0x600)
MSR_RANGE!(MSR_IA32_PEBS_ENABLE, 2),
MSR_RANGE!(MSR_IA32_DS_AREA),
// Firecracker doesn't support AMD PMU.
// - MSR_K7_EVNTSELn (0xC0010000..=0xC0010003)
// - MSR_K7_PERFCTRn (0xC0010004..=0xC0010007)
// - MSR_F15H_PERF_CTLn & MSR_F15H_PERF_CTRn (0xC0010200..=0xC001020B)
MSR_RANGE!(MSR_K7_EVNTSEL0, 4),
MSR_RANGE!(MSR_K7_PERFCTR0, 4),
MSR_RANGE!(MSR_F15H_PERF_CTL0, 12),
];
fn should_exclude_msr(index: u32) -> bool {
MSR_EXCLUSION_LIST.iter().any(|range| range.contains(index))
}
// List of MSR indices excluded from the CPU configuration dump on AMD
const MSR_EXCLUSION_LIST_AMD: [MsrRange; 1] = [
// MSR_IA32_ARCH_CAPABILITIES has been emulated by KVM since kernel 5.7.
// https://github.com/torvalds/linux/commit/93c380e7b528882396ca463971012222bad7d82e
// https://lore.kernel.org/all/20200302235709.27467-1-sean.j.christopherson@intel.com/
// As this MSR is not available on AMD processors, Firecracker disables it explicitly by
// setting 0 to CPUID.(EAX=07H,ECX=0):EDX[bit 29], and this MSR should be removed from the
// dump on AMD.
MSR_RANGE!(MSR_IA32_ARCH_CAPABILITIES),
];
fn should_exclude_msr_amd(index: u32) -> bool {
MSR_EXCLUSION_LIST_AMD
.iter()
.any(|range| range.contains(index))
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use vmm::cpu_config::x86_64::cpuid::{
CpuidEntry, CpuidKey, CpuidRegisters, IntelCpuid, KvmCpuidFlags,
};
use super::*;
fn build_sample_cpuid() -> Cpuid {
Cpuid::Intel(IntelCpuid(BTreeMap::from([
(
CpuidKey {
leaf: 0x0,
subleaf: 0x0,
},
CpuidEntry {
flags: KvmCpuidFlags::EMPTY,
result: CpuidRegisters {
eax: 0xffff_ffff,
ebx: 0x0000_ffff,
ecx: 0xffff_0000,
edx: 0x0000_0000,
},
},
),
(
CpuidKey {
leaf: 0x1,
subleaf: 0x1,
},
CpuidEntry {
flags: KvmCpuidFlags::SIGNIFICANT_INDEX,
result: CpuidRegisters {
eax: 0xaaaa_aaaa,
ebx: 0xaaaa_5555,
ecx: 0x5555_aaaa,
edx: 0x5555_5555,
},
},
),
])))
}
fn build_expected_cpuid_modifiers() -> Vec<CpuidLeafModifier> {
vec![
cpuid_leaf_modifier!(
0x0,
0x0,
KvmCpuidFlags::EMPTY,
vec![
cpuid_reg_modifier!(CpuidRegister::Eax, 0xffff_ffff),
cpuid_reg_modifier!(CpuidRegister::Ebx, 0x0000_ffff),
cpuid_reg_modifier!(CpuidRegister::Ecx, 0xffff_0000),
cpuid_reg_modifier!(CpuidRegister::Edx, 0x0000_0000),
]
),
cpuid_leaf_modifier!(
0x1,
0x1,
KvmCpuidFlags::SIGNIFICANT_INDEX,
vec![
cpuid_reg_modifier!(CpuidRegister::Eax, 0xaaaa_aaaa),
cpuid_reg_modifier!(CpuidRegister::Ebx, 0xaaaa_5555),
cpuid_reg_modifier!(CpuidRegister::Ecx, 0x5555_aaaa),
cpuid_reg_modifier!(CpuidRegister::Edx, 0x5555_5555),
]
),
]
}
fn build_sample_msrs() -> BTreeMap<u32, u64> {
let mut map = BTreeMap::from([
// should be sorted in the result.
(0x1, 0xffff_ffff_ffff_ffff),
(0x5, 0xffff_ffff_0000_0000),
(0x3, 0x0000_0000_ffff_ffff),
(0x2, 0x0000_0000_0000_0000),
]);
// should be excluded from the result.
MSR_EXCLUSION_LIST
.iter()
.chain(MSR_EXCLUSION_LIST_AMD.iter())
.for_each(|range| {
(range.base..(range.base + range.nmsrs)).for_each(|id| {
map.insert(id, 0);
})
});
map
}
fn build_expected_msr_modifiers() -> Vec<RegisterModifier> {
let mut v = vec![
msr_modifier!(0x1, 0xffff_ffff_ffff_ffff),
msr_modifier!(0x2, 0x0000_0000_0000_0000),
msr_modifier!(0x3, 0x0000_0000_ffff_ffff),
msr_modifier!(0x5, 0xffff_ffff_0000_0000),
];
if &get_vendor_id_from_host().unwrap() != VENDOR_ID_AMD {
MSR_EXCLUSION_LIST_AMD.iter().for_each(|range| {
(range.base..(range.base + range.nmsrs)).for_each(|id| {
v.push(msr_modifier!(id, 0));
})
});
}
v
}
#[test]
fn test_config_to_template() {
let cpu_config = CpuConfiguration {
cpuid: build_sample_cpuid(),
msrs: build_sample_msrs(),
};
let cpu_template = CustomCpuTemplate {
cpuid_modifiers: build_expected_cpuid_modifiers(),
msr_modifiers: build_expected_msr_modifiers(),
..Default::default()
};
assert_eq!(config_to_template(&cpu_config), cpu_template);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/dump/mod.rs | src/cpu-template-helper/src/template/dump/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#[cfg(target_arch = "aarch64")]
mod aarch64;
#[cfg(target_arch = "x86_64")]
mod x86_64;
use std::sync::{Arc, Mutex};
use vmm::cpu_config::templates::CustomCpuTemplate;
use vmm::{DumpCpuConfigError, Vmm};
#[cfg(target_arch = "aarch64")]
use crate::template::dump::aarch64::config_to_template;
#[cfg(target_arch = "x86_64")]
use crate::template::dump::x86_64::config_to_template;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum DumpError {
/// Failed to dump CPU config: {0}
DumpCpuConfig(#[from] DumpCpuConfigError),
}
pub fn dump(vmm: Arc<Mutex<Vmm>>) -> Result<CustomCpuTemplate, DumpError> {
// Get CPU configuration.
let cpu_configs = vmm.lock().unwrap().dump_cpu_config()?;
// Convert CPU config to CPU template.
Ok(config_to_template(&cpu_configs[0]))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::build_microvm_from_config;
#[test]
fn test_dump() {
let (vmm, _) = build_microvm_from_config(None, None).unwrap();
dump(vmm).unwrap();
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/dump/aarch64.rs | src/cpu-template-helper/src/template/dump/aarch64.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::arch::aarch64::regs::{PC, RegSize, SYS_CNTPCT_EL0, SYS_CNTV_CVAL_EL0};
use vmm::cpu_config::aarch64::custom_cpu_template::RegisterModifier;
use vmm::cpu_config::templates::{CpuConfiguration, CustomCpuTemplate, RegisterValueFilter};
use vmm::logger::warn;
use crate::utils::aarch64::reg_modifier;
pub fn config_to_template(cpu_config: &CpuConfiguration) -> CustomCpuTemplate {
let mut reg_modifiers: Vec<RegisterModifier> = cpu_config
.regs
.iter()
.filter_map(|reg| match reg.size() {
RegSize::U32 => Some(reg_modifier!(reg.id, u128::from(reg.value::<u32, 4>()))),
RegSize::U64 => Some(reg_modifier!(reg.id, u128::from(reg.value::<u64, 8>()))),
RegSize::U128 => Some(reg_modifier!(reg.id, reg.value::<u128, 16>())),
_ => {
warn!(
"Only 32, 64 and 128 bit wide registers are supported in cpu templates. \
Skipping: {:#x}",
reg.id
);
None
}
})
.collect();
reg_modifiers.retain(|modifier| !REG_EXCLUSION_LIST.contains(&modifier.addr));
reg_modifiers.sort_by_key(|modifier| modifier.addr);
CustomCpuTemplate {
reg_modifiers,
..Default::default()
}
}
// List of register IDs excluded from the CPU configuration dump.
const REG_EXCLUSION_LIST: [u64; 3] = [
// SYS_CNTV_CVAL_EL0 and SYS_CNTPCT_EL0 are timer registers and depend on the elapsed time.
// This type of registers are not useful as guest CPU config dump.
SYS_CNTV_CVAL_EL0,
SYS_CNTPCT_EL0,
// Program counter (PC) value is determined by the given kernel image. It should not be
// overwritten by a custom CPU template and does not need to be tracked in a fingerprint file.
PC,
];
#[cfg(test)]
mod tests {
use vmm::arch::aarch64::regs::{Aarch64RegisterRef, Aarch64RegisterVec, reg_size};
use super::*;
// These are used as IDs to satisfy requirenments
// of `Aarch64RegisterRef::new`
const KVM_REG_SIZE_U32: u64 = 0x0020000000000000;
const KVM_REG_SIZE_U64: u64 = 0x0030000000000000;
const KVM_REG_SIZE_U128: u64 = 0x0040000000000000;
const KVM_REG_SIZE_U256: u64 = 0x0050000000000000;
const KVM_REG_SIZE_U512: u64 = 0x0060000000000000;
const KVM_REG_SIZE_U1024: u64 = 0x0070000000000000;
const KVM_REG_SIZE_U2048: u64 = 0x0080000000000000;
fn build_sample_regs() -> Aarch64RegisterVec {
let mut v = Aarch64RegisterVec::default();
v.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U128,
&0xffff_ffff_ffff_ffff_ffff_ffff_ffff_ffff_u128.to_le_bytes(),
));
v.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U32,
&0x0000_ffff_u32.to_le_bytes(),
));
v.push(Aarch64RegisterRef::new(
KVM_REG_SIZE_U64,
&0x0000_ffff_0000_ffff_u64.to_le_bytes(),
));
// CPU templates only supports 32, 64 and 128 bit wide registers, so the following registers
// should be excluded from the result.
v.push(Aarch64RegisterRef::new(KVM_REG_SIZE_U256, &[0x69; 32]));
v.push(Aarch64RegisterRef::new(KVM_REG_SIZE_U512, &[0x69; 64]));
v.push(Aarch64RegisterRef::new(KVM_REG_SIZE_U1024, &[0x69; 128]));
v.push(Aarch64RegisterRef::new(KVM_REG_SIZE_U2048, &[0x69; 256]));
// The following registers should be excluded from the result.
for id in REG_EXCLUSION_LIST {
v.push(Aarch64RegisterRef::new(id, &vec![0; reg_size(id)]));
}
v
}
fn build_expected_reg_modifiers() -> Vec<RegisterModifier> {
vec![
reg_modifier!(KVM_REG_SIZE_U32, 0x0000_ffff),
reg_modifier!(KVM_REG_SIZE_U64, 0x0000_ffff_0000_ffff),
reg_modifier!(KVM_REG_SIZE_U128, 0xffff_ffff_ffff_ffff_ffff_ffff_ffff_ffff),
]
}
#[test]
fn test_config_to_template() {
let cpu_config = CpuConfiguration {
regs: build_sample_regs(),
};
let cpu_template = CustomCpuTemplate {
reg_modifiers: build_expected_reg_modifiers(),
..Default::default()
};
assert_eq!(config_to_template(&cpu_config), cpu_template);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/strip/x86_64.rs | src/cpu-template-helper/src/template/strip/x86_64.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::cpu_config::templates::CustomCpuTemplate;
use vmm::cpu_config::x86_64::custom_cpu_template::{CpuidLeafModifier, RegisterModifier};
use crate::template::strip::{StripError, strip_common};
use crate::utils::x86_64::{CpuidModifierMap, MsrModifierMap};
#[allow(dead_code)]
pub fn strip(templates: Vec<CustomCpuTemplate>) -> Result<Vec<CustomCpuTemplate>, StripError> {
// Convert `Vec<CustomCpuTemplate>` to two `Vec<HashMap<_>>` of modifiers.
let (mut cpuid_modifiers_maps, mut msr_modifiers_maps): (Vec<_>, Vec<_>) = templates
.into_iter()
.map(|template| {
(
CpuidModifierMap::from(template.cpuid_modifiers).0,
MsrModifierMap::from(template.msr_modifiers).0,
)
})
.unzip();
// Remove common items.
strip_common(&mut cpuid_modifiers_maps)?;
strip_common(&mut msr_modifiers_maps)?;
// Convert back to `Vec<CustomCpuTemplate>`.
let templates = cpuid_modifiers_maps
.into_iter()
.zip(msr_modifiers_maps)
.map(|(cpuid_modifiers_map, msr_modifiers_map)| {
let cpuid_modifiers =
Vec::<CpuidLeafModifier>::from(CpuidModifierMap(cpuid_modifiers_map));
let msr_modifiers = Vec::<RegisterModifier>::from(MsrModifierMap(msr_modifiers_map));
CustomCpuTemplate {
cpuid_modifiers,
msr_modifiers,
..Default::default()
}
})
.collect::<Vec<_>>();
Ok(templates)
}
#[cfg(test)]
mod tests {
use vmm::cpu_config::templates::RegisterValueFilter;
use vmm::cpu_config::x86_64::cpuid::KvmCpuidFlags;
use vmm::cpu_config::x86_64::custom_cpu_template::CpuidRegister::*;
use vmm::cpu_config::x86_64::custom_cpu_template::{
CpuidLeafModifier, CpuidRegisterModifier, RegisterModifier,
};
use super::*;
use crate::utils::x86_64::{cpuid_leaf_modifier, cpuid_reg_modifier, msr_modifier};
// Summary of CPUID modifiers:
// * A CPUID leaf 0x0 / subleaf 0x0 modifier exists in all the templates and its value is same.
// * A CPUID leaf 0x1 / subleaf 0x0 modifier only exists in the second template.
// * A CPUID leaf 0x2 / subleaf 0x1 modifier exists in all the templates, but EAX value is same
// and EBX value is different across them.
#[rustfmt::skip]
fn build_input_cpuid_templates() -> Vec<CustomCpuTemplate> {
vec![
CustomCpuTemplate {
cpuid_modifiers: vec![
cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0x0),
]),
cpuid_leaf_modifier!(0x2, 0x1, KvmCpuidFlags::SIGNIFICANT_INDEX, vec![
cpuid_reg_modifier!(Eax, 0x0),
cpuid_reg_modifier!(Ebx, 0x0),
]),
],
msr_modifiers: vec![],
..Default::default()
},
CustomCpuTemplate {
cpuid_modifiers: vec![
cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0x0),
]),
cpuid_leaf_modifier!(0x1, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0x0),
]),
cpuid_leaf_modifier!(0x2, 0x1, KvmCpuidFlags::SIGNIFICANT_INDEX, vec![
cpuid_reg_modifier!(Eax, 0x0),
cpuid_reg_modifier!(Ebx, 0x1),
]),
],
msr_modifiers: vec![],
..Default::default()
},
CustomCpuTemplate {
cpuid_modifiers: vec![
cpuid_leaf_modifier!(0x0, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0x0),
]),
cpuid_leaf_modifier!(0x2, 0x1, KvmCpuidFlags::SIGNIFICANT_INDEX, vec![
cpuid_reg_modifier!(Eax, 0x0),
cpuid_reg_modifier!(Ebx, 0x2),
]),
],
msr_modifiers: vec![],
..Default::default()
},
]
}
#[rustfmt::skip]
fn build_expected_cpuid_templates() -> Vec<CustomCpuTemplate> {
vec![
CustomCpuTemplate {
cpuid_modifiers: vec![
cpuid_leaf_modifier!(0x2, 0x1, KvmCpuidFlags::SIGNIFICANT_INDEX, vec![
cpuid_reg_modifier!(Ebx, 0x0, 0b11),
]),
],
msr_modifiers: vec![],
..Default::default()
},
CustomCpuTemplate {
cpuid_modifiers: vec![
cpuid_leaf_modifier!(0x1, 0x0, KvmCpuidFlags::EMPTY, vec![
cpuid_reg_modifier!(Eax, 0x0),
]),
cpuid_leaf_modifier!(0x2, 0x1, KvmCpuidFlags::SIGNIFICANT_INDEX, vec![
cpuid_reg_modifier!(Ebx, 0x1, 0b11),
]),
],
msr_modifiers: vec![],
..Default::default()
},
CustomCpuTemplate {
cpuid_modifiers: vec![
cpuid_leaf_modifier!(0x2, 0x1, KvmCpuidFlags::SIGNIFICANT_INDEX, vec![
cpuid_reg_modifier!(Ebx, 0x2, 0b11),
]),
],
msr_modifiers: vec![],
..Default::default()
},
]
}
// Summary of MSR modifiers:
// * An addr 0x0 modifier exists in all the templates but its value is different.
// * An addr 0x1 modifier exists in all the templates and its value is same.
// * An addr 0x2 modifier only exists in the third template.
#[rustfmt::skip]
fn build_input_msr_templates() -> Vec<CustomCpuTemplate> {
vec![
CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0x0),
msr_modifier!(0x1, 0x1),
],
..Default::default()
},
CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0x1),
msr_modifier!(0x1, 0x1),
],
..Default::default()
},
CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0x2),
msr_modifier!(0x1, 0x1),
msr_modifier!(0x2, 0x1),
],
..Default::default()
},
]
}
#[rustfmt::skip]
fn build_expected_msr_templates() -> Vec<CustomCpuTemplate> {
vec![
CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0x0, 0b11),
],
..Default::default()
},
CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0x1, 0b11),
],
..Default::default()
},
CustomCpuTemplate {
cpuid_modifiers: vec![],
msr_modifiers: vec![
msr_modifier!(0x0, 0x2, 0b11),
msr_modifier!(0x2, 0x1),
],
..Default::default()
},
]
}
#[test]
fn test_strip_cpuid_modifiers() {
let input = build_input_cpuid_templates();
let result = strip(input).unwrap();
let expected = build_expected_cpuid_templates();
assert_eq!(result, expected);
}
#[test]
fn test_strip_msr_modifiers() {
let input = build_input_msr_templates();
let result = strip(input).unwrap();
let expected = build_expected_msr_templates();
assert_eq!(result, expected);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/strip/mod.rs | src/cpu-template-helper/src/template/strip/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::fmt::Debug;
use vmm::cpu_config::templates::{Numeric, RegisterValueFilter};
use crate::utils::ModifierMapKey;
#[cfg(target_arch = "aarch64")]
mod aarch64;
#[cfg(target_arch = "aarch64")]
pub use aarch64::strip;
#[cfg(target_arch = "x86_64")]
mod x86_64;
#[cfg(target_arch = "x86_64")]
pub use x86_64::strip;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum StripError {
/// The number of inputs should be two or more.
NumberOfInputs,
}
fn strip_common<K, V>(maps: &mut [HashMap<K, RegisterValueFilter<V>>]) -> Result<(), StripError>
where
K: ModifierMapKey + Debug,
V: Numeric + Debug,
{
if maps.len() < 2 {
return Err(StripError::NumberOfInputs);
}
// Initialize `common` with the cloned `maps[0]`.
let mut common = maps[0].clone();
// Iterate all items included in the `common`.
// Use `maps[0]` instead of `common` since the `common` is mutated in the loop.
for (key, common_vf) in &maps[0] {
// Hold which bits are different from the `common`'s value/filter.
// `diff` remains 0 if all the filtered values in all the `maps` are same.
let mut diff = V::zero();
for map in maps[1..].iter() {
match map.get(key) {
// Record which bits of filtered value are different from the `common` if the `key`
// is found in the `map`.
Some(map_vf) => {
let map_filtered_value = map_vf.value & map_vf.filter;
let common_filtered_value = common_vf.value & common_vf.filter;
diff |= map_filtered_value ^ common_filtered_value;
}
// Remove the `key` from the `common` if at least one of the `maps` does not have
// the `key`.
None => {
common.remove(key);
}
}
}
// Store the `diff` in the `common`'s `filter` if the `key` exist in all the `maps`.
if let Some(common_vf) = common.get_mut(key) {
common_vf.filter = diff;
}
}
// Remove the `common` items from all the `maps`.
for (key, common_vf) in common {
for map in maps.iter_mut() {
if common_vf.filter == V::zero() {
// Remove the `key` if the filtered value is identical in all the `maps`.
map.remove(&key).unwrap();
} else {
// Update the `filter` with `diff`.
let map_vf = map.get_mut(&key).unwrap();
map_vf.filter = map_vf.filter & common_vf.filter;
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::tests::{MockModifierMapKey, mock_modifier};
#[test]
fn test_strip_common_with_single_input() {
let mut input = vec![HashMap::from([mock_modifier!(0x0, 0b0000_0000)])];
match strip_common(&mut input) {
Err(StripError::NumberOfInputs) => (),
_ => panic!("Should fail with `Error::NumberOfInputs`."),
}
}
#[test]
fn test_strip_common() {
let mut input = vec![
HashMap::from([
mock_modifier!(0x0, 0b1111_1111, 0b1111_1111), // 0x0 => 0b1111_1111
mock_modifier!(0x1, 0b1111_1111, 0b1111_1111), // 0x1 => 0b1111_1111
mock_modifier!(0x3, 0b1111_1111, 0b1111_1111), // 0x3 => 0b1111_1111
mock_modifier!(0x4, 0b1111_1111, 0b1111_1111), // 0x4 => 0b1111_1111
mock_modifier!(0x5, 0b1111_1111, 0b1111_1111), // 0x5 => 0b1111_1111
]),
HashMap::from([
mock_modifier!(0x0, 0b1111_1111, 0b1111_1111), // 0x0 => 0b1111_1111
mock_modifier!(0x2, 0b1111_1111, 0b1111_1111), // 0x2 => 0b1111_1111
mock_modifier!(0x3, 0b0000_1111, 0b1111_1111), // 0x3 => 0b0000_1111
mock_modifier!(0x4, 0b1111_0000, 0b1111_1111), // 0x4 => 0b1111_0000
mock_modifier!(0x5, 0b1100_0000, 0b1100_1100), // 0x5 => 0b11xx_00xx
]),
HashMap::from([
mock_modifier!(0x0, 0b1111_1111, 0b1111_1111), // 0x0 => 0b1111_1111
mock_modifier!(0x1, 0b1111_1111, 0b1111_1111), // 0x1 => 0b1111_1111
mock_modifier!(0x3, 0b1111_0000, 0b1111_1111), // 0x3 => 0b1111_0000
mock_modifier!(0x4, 0b1100_1100, 0b1111_1111), // 0x4 => 0b1100_1100
mock_modifier!(0x5, 0b1010_0000, 0b1111_0000), // 0x5 => 0b1010_xxxx
]),
];
let expected = vec![
HashMap::from([
mock_modifier!(0x1, 0b1111_1111, 0b1111_1111), // 0x1 => 0b1111_1111
mock_modifier!(0x3, 0b1111_1111, 0b1111_1111), // 0x3 => 0b1111_1111
mock_modifier!(0x4, 0b1111_1111, 0b0011_1111), // 0x4 => 0bxx11_1111
mock_modifier!(0x5, 0b1111_1111, 0b0111_1111), // 0x5 => 0bx111_1111
]),
HashMap::from([
mock_modifier!(0x2, 0b1111_1111, 0b1111_1111), // 0x2 => 0b1111_1111
mock_modifier!(0x3, 0b0000_1111, 0b1111_1111), // 0x3 => 0b0000_1111
mock_modifier!(0x4, 0b1111_0000, 0b0011_1111), // 0x4 => 0bxx11_0000
mock_modifier!(0x5, 0b1100_0000, 0b0100_1100), // 0x5 => 0bx1xx_00xx
]),
HashMap::from([
mock_modifier!(0x1, 0b1111_1111, 0b1111_1111), // 0x1 => 0b1111_1111
mock_modifier!(0x3, 0b1111_0000, 0b1111_1111), // 0x3 => 0b1111_0000
mock_modifier!(0x4, 0b1100_1100, 0b0011_1111), // 0x4 => 0bxx00_1100
mock_modifier!(0x5, 0b1010_0000, 0b0111_0000), // 0x5 => 0bx010_xxxx
]),
];
strip_common(&mut input).unwrap();
assert_eq!(input, expected);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/template/strip/aarch64.rs | src/cpu-template-helper/src/template/strip/aarch64.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use vmm::cpu_config::aarch64::custom_cpu_template::RegisterModifier;
use vmm::cpu_config::templates::CustomCpuTemplate;
use crate::template::strip::{StripError, strip_common};
use crate::utils::aarch64::RegModifierMap;
#[allow(dead_code)]
pub fn strip(templates: Vec<CustomCpuTemplate>) -> Result<Vec<CustomCpuTemplate>, StripError> {
// Convert `Vec<CustomCpuTemplate>` to `Vec<HashMap<_>>`.
let mut reg_modifiers_maps = templates
.into_iter()
.map(|template| RegModifierMap::from(template.reg_modifiers).0)
.collect::<Vec<_>>();
// Remove common items.
strip_common(&mut reg_modifiers_maps)?;
// Convert back to `Vec<CustomCpuTemplate>`.
let templates = reg_modifiers_maps
.into_iter()
.map(|reg_modifiers_map| {
let reg_modifiers = Vec::<RegisterModifier>::from(RegModifierMap(reg_modifiers_map));
CustomCpuTemplate {
reg_modifiers,
..Default::default()
}
})
.collect();
Ok(templates)
}
#[cfg(test)]
mod tests {
use vmm::cpu_config::aarch64::custom_cpu_template::RegisterModifier;
use vmm::cpu_config::templates::RegisterValueFilter;
use super::*;
use crate::utils::aarch64::reg_modifier;
// Summary of reg modifiers:
// * An addr 0x0 modifier exists in all the templates but its value is different.
// * An addr 0x1 modifier exists in all the templates and its value is same.
// * An addr 0x2 modifier only exist in the third template.
#[rustfmt::skip]
fn build_input_templates() -> Vec<CustomCpuTemplate> {
vec![
CustomCpuTemplate {
reg_modifiers: vec![
reg_modifier!(0x0, 0x0),
reg_modifier!(0x1, 0x1),
],
..Default::default()
},
CustomCpuTemplate {
reg_modifiers: vec![
reg_modifier!(0x0, 0x1),
reg_modifier!(0x1, 0x1),
],
..Default::default()
},
CustomCpuTemplate {
reg_modifiers: vec![
reg_modifier!(0x0, 0x2),
reg_modifier!(0x1, 0x1),
reg_modifier!(0x2, 0x1),
],
..Default::default()
},
]
}
#[rustfmt::skip]
fn build_expected_templates() -> Vec<CustomCpuTemplate> {
vec![
CustomCpuTemplate {
reg_modifiers: vec![
reg_modifier!(0x0, 0x0, 0b11),
],
..Default::default()
},
CustomCpuTemplate {
reg_modifiers: vec![
reg_modifier!(0x0, 0x1, 0b11),
],
..Default::default()
},
CustomCpuTemplate {
reg_modifiers: vec![
reg_modifier!(0x0, 0x2, 0b11),
reg_modifier!(0x2, 0x1),
],
..Default::default()
},
]
}
#[test]
fn test_strip_reg_modifiers() {
let input = build_input_templates();
let result = strip(input).unwrap();
let expected = build_expected_templates();
assert_eq!(result, expected);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/fingerprint/dump.rs | src/cpu-template-helper/src/fingerprint/dump.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::read_to_string;
use std::sync::{Arc, Mutex};
use vmm::Vmm;
use crate::fingerprint::Fingerprint;
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum FingerprintDumpError {
/// Failed to dump CPU config: {0}
DumpCpuConfig(#[from] crate::template::dump::DumpError),
/// Failed to read {0}: {1}
ReadSysfsFile(String, std::io::Error),
/// Failed to get kernel version: {0}
GetKernelVersion(std::io::Error),
}
pub fn dump(vmm: Arc<Mutex<Vmm>>) -> Result<Fingerprint, FingerprintDumpError> {
Ok(Fingerprint {
firecracker_version: crate::utils::CPU_TEMPLATE_HELPER_VERSION.to_string(),
kernel_version: get_kernel_version()?,
#[cfg(target_arch = "x86_64")]
microcode_version: read_sysfs_file("/sys/devices/system/cpu/cpu0/microcode/version")?,
#[cfg(target_arch = "aarch64")]
microcode_version: read_sysfs_file(
"/sys/devices/system/cpu/cpu0/regs/identification/revidr_el1",
)?,
bios_version: read_sysfs_file("/sys/devices/virtual/dmi/id/bios_version")?,
bios_revision: read_sysfs_file("/sys/devices/virtual/dmi/id/bios_release")?,
guest_cpu_config: crate::template::dump::dump(vmm)?,
})
}
fn get_kernel_version() -> Result<String, FingerprintDumpError> {
// SAFETY: An all-zeroed value for `libc::utsname` is valid.
let mut name: libc::utsname = unsafe { std::mem::zeroed() };
// SAFETY: The passed arg is a valid mutable reference of `libc::utsname`.
let ret = unsafe { libc::uname(&mut name) };
if ret < 0 {
return Err(FingerprintDumpError::GetKernelVersion(
std::io::Error::last_os_error(),
));
}
// SAFETY: The fields of `libc::utsname` are terminated by a null byte ('\0').
// https://man7.org/linux/man-pages/man2/uname.2.html
let c_str = unsafe { std::ffi::CStr::from_ptr(name.release.as_ptr()) };
// SAFETY: The `release` field is an array of `char` in C, in other words, ASCII.
let version = c_str.to_str().unwrap();
Ok(version.to_string())
}
fn read_sysfs_file(path: &str) -> Result<String, FingerprintDumpError> {
let s = read_to_string(path)
.map_err(|err| FingerprintDumpError::ReadSysfsFile(path.to_string(), err))?;
Ok(s.trim_end_matches('\n').to_string())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_kernel_version() {
// `get_kernel_version()` should always succeed.
get_kernel_version().unwrap();
}
#[test]
fn test_read_valid_sysfs_file() {
// The sysfs file for microcode version should exist and be read.
let valid_sysfs_path = "/sys/devices/virtual/dmi/id/bios_version";
read_sysfs_file(valid_sysfs_path).unwrap();
}
#[test]
fn test_read_invalid_sysfs_file() {
let invalid_sysfs_path = "/sys/invalid/path";
if read_sysfs_file(invalid_sysfs_path).is_ok() {
panic!("Should fail with `No such file or directory`");
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/fingerprint/mod.rs | src/cpu-template-helper/src/fingerprint/mod.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::{Deserialize, Serialize};
use vmm::cpu_config::templates::CustomCpuTemplate;
pub mod compare;
pub mod dump;
macro_rules! declare_fingerprint_struct_and_enum {
($($field_name:ident : $field_type:ty),+) => {
#[derive(Debug, Serialize, Deserialize)]
pub struct Fingerprint {
$(pub $field_name: $field_type),+
}
#[allow(non_camel_case_types)]
#[derive(clap::ValueEnum, Clone, Debug)]
#[value(rename_all = "snake_case")]
pub enum FingerprintField {
$($field_name),+
}
};
}
// This macro is expanded as follows:
// ```rs
// #[derive(Serialize, Deserialize)]
// pub struct Fingerprint {
// pub firecracker_version: String,
// pub kernel_version: String,
// pub microcode_version: String,
// pub bios_version: String,
// pub bios_revision: String,
// pub guest_cpu_config: CustomCpuTemplate,
// }
//
// #[allow(non_camel_case_types)]
// #[derive(clap::ValueEnum, Clone, Debug)]
// #[value(rename_all = "snake_case")]
// pub enum FingerprintField {
// firecracker_version,
// kernel_version,
// microcode_version,
// bios_version,
// bios_revision,
// guest_cpu_config,
// }
// ```
declare_fingerprint_struct_and_enum!(
firecracker_version: String,
kernel_version: String,
microcode_version: String,
bios_version: String,
bios_revision: String,
guest_cpu_config: CustomCpuTemplate
);
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/cpu-template-helper/src/fingerprint/compare.rs | src/cpu-template-helper/src/fingerprint/compare.rs | // Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::Serialize;
use crate::fingerprint::{Fingerprint, FingerprintField};
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum FingerprintCompareError {
/// Difference detected between source and target: {0}
DiffDetected(String),
/// Failed to serialize/deserialize JSON: {0}
Serde(#[from] serde_json::Error),
}
#[derive(Serialize)]
struct Diff<'a, T: Serialize> {
name: String,
prev: &'a T,
curr: &'a T,
}
pub fn compare(
prev: Fingerprint,
curr: Fingerprint,
filters: Vec<FingerprintField>,
) -> Result<(), FingerprintCompareError> {
let compare =
|field: &FingerprintField, val1, val2| -> Option<Result<String, serde_json::Error>> {
if val1 != val2 {
let diff = Diff {
name: format!("{field:#?}"),
prev: val1,
curr: val2,
};
Some(serde_json::to_string_pretty(&diff))
} else {
None
}
};
let results = filters
.into_iter()
.filter_map(|filter| {
match filter {
FingerprintField::firecracker_version => compare(
&filter,
&prev.firecracker_version,
&curr.firecracker_version,
),
FingerprintField::kernel_version => {
compare(&filter, &prev.kernel_version, &curr.kernel_version)
}
FingerprintField::microcode_version => {
compare(&filter, &prev.microcode_version, &curr.microcode_version)
}
FingerprintField::bios_version => {
compare(&filter, &prev.bios_version, &curr.bios_version)
}
FingerprintField::bios_revision => {
compare(&filter, &prev.bios_revision, &curr.bios_revision)
}
FingerprintField::guest_cpu_config => {
if prev.guest_cpu_config != curr.guest_cpu_config {
let cpu_configs =
vec![prev.guest_cpu_config.clone(), curr.guest_cpu_config.clone()];
// This `strip()` call always succeed since the number of inputs is two.
let cpu_configs = crate::template::strip::strip(cpu_configs).unwrap();
let diff = Diff {
name: format!("{filter:#?}"),
prev: &cpu_configs[0],
curr: &cpu_configs[1],
};
Some(serde_json::to_string_pretty(&diff))
} else {
None
}
}
}
})
.collect::<Result<Vec<_>, serde_json::Error>>()?;
if results.is_empty() {
Ok(())
} else {
Err(FingerprintCompareError::DiffDetected(format!(
"\n{}",
results.join("\n")
)))
}
}
#[cfg(test)]
mod tests {
use clap::ValueEnum;
use vmm::cpu_config::templates::CustomCpuTemplate;
use super::*;
fn build_sample_fingerprint() -> Fingerprint {
Fingerprint {
firecracker_version: crate::utils::CPU_TEMPLATE_HELPER_VERSION.to_string(),
kernel_version: "sample_kernel_version".to_string(),
microcode_version: "sample_microcode_version".to_string(),
bios_version: "sample_bios_version".to_string(),
bios_revision: "sample_bios_revision".to_string(),
guest_cpu_config: CustomCpuTemplate::default(),
}
}
#[test]
fn test_compare_same_fingerprints() {
// Compare two identical fingerprints and verify `Ok` is returned.
let f1 = build_sample_fingerprint();
let f2 = build_sample_fingerprint();
let filters = FingerprintField::value_variants().to_vec();
compare(f1, f2, filters).unwrap();
}
#[test]
#[rustfmt::skip]
fn test_compare_different_fingerprints() {
// Compare two fingerprints that different on `kernel_version` and `microcode_version` with
// a filter of `kernel_version`, and verify that `Err` is returned and only `kernel_version`
// change detected.
let f1 = build_sample_fingerprint();
let mut f2 = build_sample_fingerprint();
f2.kernel_version = "different_kernel_version".to_string();
f2.microcode_version = "different_microcode_version".to_string();
let filters = vec![FingerprintField::kernel_version];
let result = compare(f1, f2, filters);
match result {
Err(FingerprintCompareError::DiffDetected(err)) => {
assert_eq!(
err,
"\n{\
\n \"name\": \"kernel_version\",\
\n \"prev\": \"sample_kernel_version\",\
\n \"curr\": \"different_kernel_version\"\
\n}"
.to_string()
);
}
_ => panic!("Should detect difference of `kernel_version`"),
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/pci/src/lib.rs | src/pci/src/lib.rs | // Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
//! Implements pci devices and busses.
extern crate log;
use std::fmt::{self, Debug, Display};
use std::num::ParseIntError;
use std::str::FromStr;
use serde::de::Visitor;
use serde::{Deserialize, Serialize};
/// PCI has four interrupt pins A->D.
#[derive(Copy, Clone)]
pub enum PciInterruptPin {
IntA,
IntB,
IntC,
IntD,
}
impl PciInterruptPin {
pub fn to_mask(self) -> u32 {
self as u32
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd)]
pub struct PciBdf(u32);
struct PciBdfVisitor;
impl Visitor<'_> for PciBdfVisitor {
type Value = PciBdf;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct PciBdf")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
PciBdf::from_str(v).map_err(serde::de::Error::custom)
}
}
impl<'de> serde::Deserialize<'de> for PciBdf {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_str(PciBdfVisitor)
}
}
impl serde::Serialize for PciBdf {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.collect_str(&self.to_string())
}
}
impl PciBdf {
pub fn segment(&self) -> u16 {
((self.0 >> 16) & 0xffff) as u16
}
pub fn bus(&self) -> u8 {
((self.0 >> 8) & 0xff) as u8
}
pub fn device(&self) -> u8 {
((self.0 >> 3) & 0x1f) as u8
}
pub fn function(&self) -> u8 {
(self.0 & 0x7) as u8
}
pub fn new(segment: u16, bus: u8, device: u8, function: u8) -> Self {
Self(
((segment as u32) << 16)
| ((bus as u32) << 8)
| (((device & 0x1f) as u32) << 3)
| (function & 0x7) as u32,
)
}
}
impl From<u32> for PciBdf {
fn from(bdf: u32) -> Self {
Self(bdf)
}
}
impl From<PciBdf> for u32 {
fn from(bdf: PciBdf) -> Self {
bdf.0
}
}
impl From<&PciBdf> for u32 {
fn from(bdf: &PciBdf) -> Self {
bdf.0
}
}
impl From<PciBdf> for u16 {
fn from(bdf: PciBdf) -> Self {
(bdf.0 & 0xffff) as u16
}
}
impl From<&PciBdf> for u16 {
fn from(bdf: &PciBdf) -> Self {
(bdf.0 & 0xffff) as u16
}
}
impl Debug for PciBdf {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{:04x}:{:02x}:{:02x}.{:01x}",
self.segment(),
self.bus(),
self.device(),
self.function()
)
}
}
impl Display for PciBdf {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{:04x}:{:02x}:{:02x}.{:01x}",
self.segment(),
self.bus(),
self.device(),
self.function()
)
}
}
/// Errors associated with parsing a BDF string.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum PciBdfParseError {
/// Unable to parse bus/device/function number hex: {0}
InvalidHex(#[from] ParseIntError),
/// Invalid format: {0} (expected format: 0000:00:00.0)
InvalidFormat(String),
}
impl FromStr for PciBdf {
type Err = PciBdfParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let items: Vec<&str> = s.split('.').collect();
if items.len() != 2 {
return Err(PciBdfParseError::InvalidFormat(s.to_string()));
}
let function = u8::from_str_radix(items[1], 16)?;
let items: Vec<&str> = items[0].split(':').collect();
if items.len() != 3 {
return Err(PciBdfParseError::InvalidFormat(s.to_string()));
}
let segment = u16::from_str_radix(items[0], 16)?;
let bus = u8::from_str_radix(items[1], 16)?;
let device = u8::from_str_radix(items[2], 16)?;
Ok(PciBdf::new(segment, bus, device, function))
}
}
/// Represents the types of PCI headers allowed in the configuration registers.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum PciHeaderType {
Device,
Bridge,
}
/// Classes of PCI nodes.
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum PciClassCode {
TooOld,
MassStorage,
NetworkController,
DisplayController,
MultimediaController,
MemoryController,
BridgeDevice,
SimpleCommunicationController,
BaseSystemPeripheral,
InputDevice,
DockingStation,
Processor,
SerialBusController,
WirelessController,
IntelligentIoController,
EncryptionController,
DataAcquisitionSignalProcessing,
Other = 0xff,
}
impl PciClassCode {
pub fn get_register_value(self) -> u8 {
self as u8
}
}
/// A PCI subclass. Each class in `PciClassCode` can specify a unique set of subclasses. This trait
/// is implemented by each subclass. It allows use of a trait object to generate configurations.
pub trait PciSubclass {
/// Convert this subclass to the value used in the PCI specification.
fn get_register_value(&self) -> u8;
}
/// Subclasses of the MultimediaController class.
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum PciMultimediaSubclass {
VideoController = 0x00,
AudioController = 0x01,
TelephonyDevice = 0x02,
AudioDevice = 0x03,
Other = 0x80,
}
impl PciSubclass for PciMultimediaSubclass {
fn get_register_value(&self) -> u8 {
*self as u8
}
}
/// Subclasses of the BridgeDevice
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum PciBridgeSubclass {
HostBridge = 0x00,
IsaBridge = 0x01,
EisaBridge = 0x02,
McaBridge = 0x03,
PciToPciBridge = 0x04,
PcmciaBridge = 0x05,
NuBusBridge = 0x06,
CardBusBridge = 0x07,
RacEwayBridge = 0x08,
PciToPciSemiTransparentBridge = 0x09,
InfiniBrandToPciHostBridge = 0x0a,
OtherBridgeDevice = 0x80,
}
impl PciSubclass for PciBridgeSubclass {
fn get_register_value(&self) -> u8 {
*self as u8
}
}
/// Subclass of the SerialBus
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum PciSerialBusSubClass {
Firewire = 0x00,
Accessbus = 0x01,
Ssa = 0x02,
Usb = 0x03,
}
impl PciSubclass for PciSerialBusSubClass {
fn get_register_value(&self) -> u8 {
*self as u8
}
}
/// Mass Storage Sub Classes
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum PciMassStorageSubclass {
ScsiStorage = 0x00,
IdeInterface = 0x01,
FloppyController = 0x02,
IpiController = 0x03,
RaidController = 0x04,
AtaController = 0x05,
SataController = 0x06,
SerialScsiController = 0x07,
NvmController = 0x08,
MassStorage = 0x80,
}
impl PciSubclass for PciMassStorageSubclass {
fn get_register_value(&self) -> u8 {
*self as u8
}
}
/// Network Controller Sub Classes
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum PciNetworkControllerSubclass {
EthernetController = 0x00,
TokenRingController = 0x01,
FddiController = 0x02,
AtmController = 0x03,
IsdnController = 0x04,
WorldFipController = 0x05,
PicmgController = 0x06,
InfinibandController = 0x07,
FabricController = 0x08,
NetworkController = 0x80,
}
impl PciSubclass for PciNetworkControllerSubclass {
fn get_register_value(&self) -> u8 {
*self as u8
}
}
/// Types of PCI capabilities.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[repr(u8)]
pub enum PciCapabilityId {
ListId = 0,
PowerManagement = 0x01,
AcceleratedGraphicsPort = 0x02,
VitalProductData = 0x03,
SlotIdentification = 0x04,
MessageSignalledInterrupts = 0x05,
CompactPciHotSwap = 0x06,
PciX = 0x07,
HyperTransport = 0x08,
VendorSpecific = 0x09,
Debugport = 0x0A,
CompactPciCentralResourceControl = 0x0B,
PciStandardHotPlugController = 0x0C,
BridgeSubsystemVendorDeviceId = 0x0D,
AgpTargetPciPcibridge = 0x0E,
SecureDevice = 0x0F,
PciExpress = 0x10,
MsiX = 0x11,
SataDataIndexConf = 0x12,
PciAdvancedFeatures = 0x13,
PciEnhancedAllocation = 0x14,
}
impl From<u8> for PciCapabilityId {
fn from(c: u8) -> Self {
match c {
0 => PciCapabilityId::ListId,
0x01 => PciCapabilityId::PowerManagement,
0x02 => PciCapabilityId::AcceleratedGraphicsPort,
0x03 => PciCapabilityId::VitalProductData,
0x04 => PciCapabilityId::SlotIdentification,
0x05 => PciCapabilityId::MessageSignalledInterrupts,
0x06 => PciCapabilityId::CompactPciHotSwap,
0x07 => PciCapabilityId::PciX,
0x08 => PciCapabilityId::HyperTransport,
0x09 => PciCapabilityId::VendorSpecific,
0x0A => PciCapabilityId::Debugport,
0x0B => PciCapabilityId::CompactPciCentralResourceControl,
0x0C => PciCapabilityId::PciStandardHotPlugController,
0x0D => PciCapabilityId::BridgeSubsystemVendorDeviceId,
0x0E => PciCapabilityId::AgpTargetPciPcibridge,
0x0F => PciCapabilityId::SecureDevice,
0x10 => PciCapabilityId::PciExpress,
0x11 => PciCapabilityId::MsiX,
0x12 => PciCapabilityId::SataDataIndexConf,
0x13 => PciCapabilityId::PciAdvancedFeatures,
0x14 => PciCapabilityId::PciEnhancedAllocation,
_ => PciCapabilityId::ListId,
}
}
}
/// Types of PCI Express capabilities.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[allow(dead_code)]
#[repr(u16)]
pub enum PciExpressCapabilityId {
NullCapability = 0x0000,
AdvancedErrorReporting = 0x0001,
VirtualChannelMultiFunctionVirtualChannelNotPresent = 0x0002,
DeviceSerialNumber = 0x0003,
PowerBudgeting = 0x0004,
RootComplexLinkDeclaration = 0x0005,
RootComplexInternalLinkControl = 0x0006,
RootComplexEventCollectorEndpointAssociation = 0x0007,
MultiFunctionVirtualChannel = 0x0008,
VirtualChannelMultiFunctionVirtualChannelPresent = 0x0009,
RootComplexRegisterBlock = 0x000a,
VendorSpecificExtendedCapability = 0x000b,
ConfigurationAccessCorrelation = 0x000c,
AccessControlServices = 0x000d,
AlternativeRoutingIdentificationInterpretation = 0x000e,
AddressTranslationServices = 0x000f,
SingleRootIoVirtualization = 0x0010,
DeprecatedMultiRootIoVirtualization = 0x0011,
Multicast = 0x0012,
PageRequestInterface = 0x0013,
ReservedForAmd = 0x0014,
ResizeableBar = 0x0015,
DynamicPowerAllocation = 0x0016,
ThpRequester = 0x0017,
LatencyToleranceReporting = 0x0018,
SecondaryPciExpress = 0x0019,
ProtocolMultiplexing = 0x001a,
ProcessAddressSpaceId = 0x001b,
LnRequester = 0x001c,
DownstreamPortContainment = 0x001d,
L1PmSubstates = 0x001e,
PrecisionTimeMeasurement = 0x001f,
PciExpressOverMphy = 0x0020,
FRSQueueing = 0x0021,
ReadinessTimeReporting = 0x0022,
DesignatedVendorSpecificExtendedCapability = 0x0023,
VfResizeableBar = 0x0024,
DataLinkFeature = 0x0025,
PhysicalLayerSixteenGts = 0x0026,
LaneMarginingAtTheReceiver = 0x0027,
HierarchyId = 0x0028,
NativePcieEnclosureManagement = 0x0029,
PhysicalLayerThirtyTwoGts = 0x002a,
AlternateProtocol = 0x002b,
SystemFirmwareIntermediary = 0x002c,
ShadowFunctions = 0x002d,
DataObjectExchange = 0x002e,
Reserved = 0x002f,
ExtendedCapabilitiesAbsence = 0xffff,
}
impl From<u16> for PciExpressCapabilityId {
fn from(c: u16) -> Self {
match c {
0x0000 => PciExpressCapabilityId::NullCapability,
0x0001 => PciExpressCapabilityId::AdvancedErrorReporting,
0x0002 => PciExpressCapabilityId::VirtualChannelMultiFunctionVirtualChannelNotPresent,
0x0003 => PciExpressCapabilityId::DeviceSerialNumber,
0x0004 => PciExpressCapabilityId::PowerBudgeting,
0x0005 => PciExpressCapabilityId::RootComplexLinkDeclaration,
0x0006 => PciExpressCapabilityId::RootComplexInternalLinkControl,
0x0007 => PciExpressCapabilityId::RootComplexEventCollectorEndpointAssociation,
0x0008 => PciExpressCapabilityId::MultiFunctionVirtualChannel,
0x0009 => PciExpressCapabilityId::VirtualChannelMultiFunctionVirtualChannelPresent,
0x000a => PciExpressCapabilityId::RootComplexRegisterBlock,
0x000b => PciExpressCapabilityId::VendorSpecificExtendedCapability,
0x000c => PciExpressCapabilityId::ConfigurationAccessCorrelation,
0x000d => PciExpressCapabilityId::AccessControlServices,
0x000e => PciExpressCapabilityId::AlternativeRoutingIdentificationInterpretation,
0x000f => PciExpressCapabilityId::AddressTranslationServices,
0x0010 => PciExpressCapabilityId::SingleRootIoVirtualization,
0x0011 => PciExpressCapabilityId::DeprecatedMultiRootIoVirtualization,
0x0012 => PciExpressCapabilityId::Multicast,
0x0013 => PciExpressCapabilityId::PageRequestInterface,
0x0014 => PciExpressCapabilityId::ReservedForAmd,
0x0015 => PciExpressCapabilityId::ResizeableBar,
0x0016 => PciExpressCapabilityId::DynamicPowerAllocation,
0x0017 => PciExpressCapabilityId::ThpRequester,
0x0018 => PciExpressCapabilityId::LatencyToleranceReporting,
0x0019 => PciExpressCapabilityId::SecondaryPciExpress,
0x001a => PciExpressCapabilityId::ProtocolMultiplexing,
0x001b => PciExpressCapabilityId::ProcessAddressSpaceId,
0x001c => PciExpressCapabilityId::LnRequester,
0x001d => PciExpressCapabilityId::DownstreamPortContainment,
0x001e => PciExpressCapabilityId::L1PmSubstates,
0x001f => PciExpressCapabilityId::PrecisionTimeMeasurement,
0x0020 => PciExpressCapabilityId::PciExpressOverMphy,
0x0021 => PciExpressCapabilityId::FRSQueueing,
0x0022 => PciExpressCapabilityId::ReadinessTimeReporting,
0x0023 => PciExpressCapabilityId::DesignatedVendorSpecificExtendedCapability,
0x0024 => PciExpressCapabilityId::VfResizeableBar,
0x0025 => PciExpressCapabilityId::DataLinkFeature,
0x0026 => PciExpressCapabilityId::PhysicalLayerSixteenGts,
0x0027 => PciExpressCapabilityId::LaneMarginingAtTheReceiver,
0x0028 => PciExpressCapabilityId::HierarchyId,
0x0029 => PciExpressCapabilityId::NativePcieEnclosureManagement,
0x002a => PciExpressCapabilityId::PhysicalLayerThirtyTwoGts,
0x002b => PciExpressCapabilityId::AlternateProtocol,
0x002c => PciExpressCapabilityId::SystemFirmwareIntermediary,
0x002d => PciExpressCapabilityId::ShadowFunctions,
0x002e => PciExpressCapabilityId::DataObjectExchange,
0xffff => PciExpressCapabilityId::ExtendedCapabilitiesAbsence,
_ => PciExpressCapabilityId::Reserved,
}
}
}
/// See pci_regs.h in kernel
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]
pub enum PciBarRegionType {
Memory32BitRegion = 0,
IoRegion = 0x01,
Memory64BitRegion = 0x04,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum PciBarPrefetchable {
NotPrefetchable = 0,
Prefetchable = 0x08,
}
impl From<PciBarPrefetchable> for bool {
fn from(val: PciBarPrefetchable) -> Self {
match val {
PciBarPrefetchable::NotPrefetchable => false,
PciBarPrefetchable::Prefetchable => true,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pci_bdf_new() {
let bdf = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
assert_eq!(bdf.segment(), 0x1234);
assert_eq!(bdf.bus(), 0x56);
assert_eq!(bdf.device(), 0x1f);
assert_eq!(bdf.function(), 0x7);
}
#[test]
fn test_pci_bdf_from_u32() {
let bdf = PciBdf::from(0x12345678);
assert_eq!(bdf.segment(), 0x1234);
assert_eq!(bdf.bus(), 0x56);
assert_eq!(bdf.device(), 0x0f);
assert_eq!(bdf.function(), 0x0);
}
#[test]
fn test_pci_bdf_to_u32() {
let bdf = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
let val: u32 = bdf.into();
assert_eq!(val, 0x123456ff);
}
#[test]
fn test_pci_bdf_to_u16() {
let bdf = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
let val: u16 = bdf.into();
assert_eq!(val, 0x56ff);
}
#[test]
fn test_pci_bdf_from_str_valid() {
let bdf = PciBdf::from_str("1234:56:1f.7").unwrap();
assert_eq!(bdf.segment(), 0x1234);
assert_eq!(bdf.bus(), 0x56);
assert_eq!(bdf.device(), 0x1f);
assert_eq!(bdf.function(), 0x7);
}
#[test]
fn test_pci_bdf_from_str_zero() {
let bdf = PciBdf::from_str("0000:00:00.0").unwrap();
assert_eq!(bdf.segment(), 0);
assert_eq!(bdf.bus(), 0);
assert_eq!(bdf.device(), 0);
assert_eq!(bdf.function(), 0);
}
#[test]
fn test_pci_bdf_from_str_invalid_format() {
assert!(matches!(
PciBdf::from_str("invalid"),
Err(PciBdfParseError::InvalidFormat(_))
));
assert!(matches!(
PciBdf::from_str("1234:56"),
Err(PciBdfParseError::InvalidFormat(_))
));
assert!(matches!(
PciBdf::from_str("1234:56:78:9a.b"),
Err(PciBdfParseError::InvalidFormat(_))
));
}
#[test]
fn test_pci_bdf_from_str_invalid_hex() {
assert!(matches!(
PciBdf::from_str("xxxx:00:00.0"),
Err(PciBdfParseError::InvalidHex(_))
));
assert!(matches!(
PciBdf::from_str("0000:xx:00.0"),
Err(PciBdfParseError::InvalidHex(_))
));
assert!(matches!(
PciBdf::from_str("0000:00:xx.0"),
Err(PciBdfParseError::InvalidHex(_))
));
assert!(matches!(
PciBdf::from_str("0000:00:00.x"),
Err(PciBdfParseError::InvalidHex(_))
));
}
#[test]
fn test_pci_bdf_display() {
let bdf = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
assert_eq!(format!("{}", bdf), "1234:56:1f.7");
}
#[test]
fn test_pci_bdf_debug() {
let bdf = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
assert_eq!(format!("{:?}", bdf), "1234:56:1f.7");
}
#[test]
fn test_pci_bdf_partial_eq() {
let bdf1 = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
let bdf2 = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
let bdf3 = PciBdf::new(0x1234, 0x56, 0x1f, 0x6);
assert_eq!(bdf1, bdf2);
assert_ne!(bdf1, bdf3);
}
#[test]
fn test_pci_bdf_partial_ord() {
let bdf1 = PciBdf::new(0x1234, 0x56, 0x1f, 0x6);
let bdf2 = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
assert!(bdf1 < bdf2);
}
#[test]
fn test_pci_bdf_deserialize_ok() {
// Test deserializer
let visitor = PciBdfVisitor;
let result = visitor
.visit_str::<serde::de::value::Error>("1234:56:1f.7")
.unwrap();
assert_eq!(result, PciBdf::new(0x1234, 0x56, 0x1f, 0x7));
}
#[test]
fn test_pci_bdf_deserialize_invalid() {
// Test deserializer with invalid input returns error
let visitor = PciBdfVisitor;
assert!(visitor
.visit_str::<serde::de::value::Error>("invalid")
.is_err());
}
#[test]
fn test_pci_bdf_serialize() {
// Test serializer using serde_test
let bdf = PciBdf::new(0x1234, 0x56, 0x1f, 0x7);
serde_test::assert_tokens(&bdf, &[serde_test::Token::Str("1234:56:1f.7")]);
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/seccompiler/build.rs | src/seccompiler/build.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
fn main() {
println!("cargo::rustc-link-search=/usr/local/lib");
println!("cargo::rustc-link-lib=seccomp");
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/seccompiler/src/bindings.rs | src/seccompiler/src/bindings.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright 2021 Sony Group Corporation
//
// SPDX-License-Identifier: Apache-2.0
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
//! Raw FFI bindings for libseccomp library
use std::os::raw::*;
pub const MINUS_EEXIST: i32 = -libc::EEXIST;
/// Filter context/handle (`*mut`)
pub type scmp_filter_ctx = *mut c_void;
/// Filter context/handle (`*const`)
pub type const_scmp_filter_ctx = *const c_void;
/// Comparison operators
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub enum scmp_compare {
_SCMP_CMP_MIN = 0,
/// not equal
SCMP_CMP_NE = 1,
/// less than
SCMP_CMP_LT = 2,
/// less than or equal
SCMP_CMP_LE = 3,
/// equal
SCMP_CMP_EQ = 4,
/// greater than or equal
SCMP_CMP_GE = 5,
/// greater than
SCMP_CMP_GT = 6,
/// masked equality
SCMP_CMP_MASKED_EQ = 7,
_SCMP_CMP_MAX,
}
/// Argument datum
pub type scmp_datum_t = u64;
/// Argument / Value comparison definition
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct scmp_arg_cmp {
/// argument number, starting at 0
pub arg: c_uint,
/// the comparison op, e.g. `SCMP_CMP_*`
pub op: scmp_compare,
pub datum_a: scmp_datum_t,
pub datum_b: scmp_datum_t,
}
pub const SCMP_ARCH_X86_64: u32 = 0xc000003e;
pub const SCMP_ARCH_AARCH64: u32 = 0xc00000b7;
/// Kill the process
pub const SCMP_ACT_KILL_PROCESS: u32 = 0x80000000;
/// Kill the thread
pub const SCMP_ACT_KILL_THREAD: u32 = 0x00000000;
/// Throw a `SIGSYS` signal
pub const SCMP_ACT_TRAP: u32 = 0x00030000;
/// Notifies userspace
pub const SCMP_ACT_ERRNO_MASK: u32 = 0x00050000;
/// Return the specified error code
#[must_use]
pub const fn SCMP_ACT_ERRNO(x: u16) -> u32 {
SCMP_ACT_ERRNO_MASK | x as u32
}
pub const SCMP_ACT_TRACE_MASK: u32 = 0x7ff00000;
/// Notify a tracing process with the specified value
#[must_use]
pub const fn SCMP_ACT_TRACE(x: u16) -> u32 {
SCMP_ACT_TRACE_MASK | x as u32
}
/// Allow the syscall to be executed after the action has been logged
pub const SCMP_ACT_LOG: u32 = 0x7ffc0000;
/// Allow the syscall to be executed
pub const SCMP_ACT_ALLOW: u32 = 0x7fff0000;
#[link(name = "seccomp")]
unsafe extern "C" {
/// Initialize the filter state
///
/// - `def_action`: the default filter action
///
/// This function initializes the internal seccomp filter state and should
/// be called before any other functions in this library to ensure the filter
/// state is initialized. Returns a filter context on success, `ptr::null()` on failure.
pub safe fn seccomp_init(def_action: u32) -> scmp_filter_ctx;
/// Adds an architecture to the filter
///
/// - `ctx`: the filter context
/// - `arch_token`: the architecture token, e.g. `SCMP_ARCH_*`
///
/// This function adds a new architecture to the given seccomp filter context.
/// Any new rules added after this function successfully returns will be added
/// to this architecture but existing rules will not be added to this
/// architecture. If the architecture token is [`SCMP_ARCH_NATIVE`] then the native
/// architecture will be assumed. Returns zero on success, `-libc::EEXIST` if
/// specified architecture is already present, other negative values on failure.
pub fn seccomp_arch_add(ctx: scmp_filter_ctx, arch_token: u32) -> c_int;
/// Resolve a syscall name to a number
///
/// - `name`: the syscall name
///
/// Resolve the given syscall name to the syscall number. Returns the syscall
/// number on success, including negative pseudo syscall numbers (e.g. `__PNR_*`);
/// returns [`__NR_SCMP_ERROR`] on failure.
pub fn seccomp_syscall_resolve_name(name: *const c_char) -> c_int;
/// Add a new rule to the filter
///
/// - `ctx`: the filter context
/// - `action`: the filter action
/// - `syscall`: the syscall number
/// - `arg_cnt`: the number of argument filters in the argument filter chain
/// - `...`: [`scmp_arg_cmp`] structs
///
/// This function adds a series of new argument/value checks to the seccomp
/// filter for the given syscall; multiple argument/value checks can be
/// specified and they will be chained together (AND'd together) in the filter.
/// If the specified rule needs to be adjusted due to architecture specifics it
/// will be adjusted without notification. Returns zero on success, negative
/// values on failure.
pub fn seccomp_rule_add(
ctx: scmp_filter_ctx,
action: u32,
syscall: c_int,
arg_cnt: c_uint,
...
) -> c_int;
/// Add a new rule to the filter
///
/// - `ctx`: the filter context
/// - `action`: the filter action
/// - `syscall`: the syscall number
/// - `arg_cnt`: the number of elements in the arg_array parameter
/// - `arg_array`: array of [`scmp_arg_cmp`] structs
///
/// This function adds a series of new argument/value checks to the seccomp
/// filter for the given syscall; multiple argument/value checks can be
/// specified and they will be chained together (AND'd together) in the filter.
/// If the specified rule needs to be adjusted due to architecture specifics it
/// will be adjusted without notification. Returns zero on success, negative
/// values on failure.
pub fn seccomp_rule_add_array(
ctx: scmp_filter_ctx,
action: u32,
syscall: c_int,
arg_cnt: c_uint,
arg_array: *const scmp_arg_cmp,
) -> c_int;
/// Generate seccomp Berkeley Packet Filter (BPF) code and export it to a file
///
/// - `ctx`: the filter context
/// - `fd`: the destination fd
///
/// This function generates seccomp Berkeley Packer Filter (BPF) code and writes
/// it to the given fd. Returns zero on success, negative values on failure.
pub fn seccomp_export_bpf(ctx: const_scmp_filter_ctx, fd: c_int) -> c_int;
}
/// Negative pseudo syscall number returned by some functions in case of an error
pub const __NR_SCMP_ERROR: c_int = -1;
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/seccompiler/src/lib.rs | src/seccompiler/src/lib.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::BTreeMap;
use std::fs::File;
use std::io::{Read, Seek};
use std::os::fd::{AsRawFd, FromRawFd};
use std::os::unix::fs::MetadataExt;
use std::str::FromStr;
use bincode::config;
use bincode::config::{Configuration, Fixint, Limit, LittleEndian};
use bincode::error::EncodeError as BincodeError;
mod bindings;
use bindings::*;
pub mod types;
pub use types::*;
use zerocopy::IntoBytes;
// This byte limit is passed to `bincode` to guard against a potential memory
// allocation DOS caused by binary filters that are too large.
// This limit can be safely determined since the maximum length of a BPF
// filter is 4096 instructions and Firecracker has a finite number of threads.
const DESERIALIZATION_BYTES_LIMIT: usize = 100_000;
pub const BINCODE_CONFIG: Configuration<LittleEndian, Fixint, Limit<DESERIALIZATION_BYTES_LIMIT>> =
config::standard()
.with_fixed_int_encoding()
.with_limit::<DESERIALIZATION_BYTES_LIMIT>()
.with_little_endian();
/// Binary filter compilation errors.
#[derive(Debug, thiserror::Error, displaydoc::Display)]
pub enum CompilationError {
/// Cannot open input file: {0}
IntputOpen(std::io::Error),
/// Cannot read input file: {0}
InputRead(std::io::Error),
/// Cannot deserialize json: {0}
JsonDeserialize(serde_json::Error),
/// Cannot parse arch: {0}
ArchParse(String),
/// Cannot create libseccomp context
LibSeccompContext,
/// Cannot add libseccomp arch
LibSeccompArch,
/// Cannot add libseccomp syscall
LibSeccompSycall,
/// Cannot add libseccomp syscall rule
LibSeccompRule,
/// Cannot export libseccomp bpf
LibSeccompExport,
/// Cannot create memfd: {0}
MemfdCreate(std::io::Error),
/// Cannot rewind memfd: {0}
MemfdRewind(std::io::Error),
/// Cannot read from memfd: {0}
MemfdRead(std::io::Error),
/// Cannot create output file: {0}
OutputCreate(std::io::Error),
/// Cannot serialize bfp: {0}
BincodeSerialize(BincodeError),
}
pub fn compile_bpf(
input_path: &str,
arch: &str,
out_path: &str,
basic: bool,
) -> Result<(), CompilationError> {
let mut file_content = String::new();
File::open(input_path)
.map_err(CompilationError::IntputOpen)?
.read_to_string(&mut file_content)
.map_err(CompilationError::InputRead)?;
let bpf_map_json: BpfJson =
serde_json::from_str(&file_content).map_err(CompilationError::JsonDeserialize)?;
let arch = TargetArch::from_str(arch).map_err(CompilationError::ArchParse)?;
// SAFETY: Safe because the parameters are valid.
let memfd_fd = unsafe { libc::memfd_create(c"bpf".as_ptr().cast(), 0) };
if memfd_fd < 0 {
return Err(CompilationError::MemfdCreate(
std::io::Error::last_os_error(),
));
}
// SAFETY: Safe because the parameters are valid.
let mut memfd = unsafe { File::from_raw_fd(memfd_fd) };
let mut bpf_map: BTreeMap<String, Vec<u64>> = BTreeMap::new();
for (name, filter) in bpf_map_json.0.iter() {
let default_action = filter.default_action.to_scmp_type();
let filter_action = filter.filter_action.to_scmp_type();
// SAFETY: Safe as all args are correct.
let bpf_filter = {
let r = seccomp_init(default_action);
if r.is_null() {
return Err(CompilationError::LibSeccompContext);
}
r
};
// SAFETY: Safe as all args are correct.
unsafe {
let r = seccomp_arch_add(bpf_filter, arch.to_scmp_type());
if r != 0 && r != MINUS_EEXIST {
return Err(CompilationError::LibSeccompArch);
}
}
for rule in filter.filter.iter() {
// SAFETY: Safe as all args are correct.
let syscall = unsafe {
let r = seccomp_syscall_resolve_name(rule.syscall.as_ptr());
if r == __NR_SCMP_ERROR {
return Err(CompilationError::LibSeccompSycall);
}
r
};
// TODO remove when we drop deprecated "basic" arg from cli.
// "basic" bpf means it ignores condition checks.
if basic {
// SAFETY: Safe as all args are correct.
unsafe {
if seccomp_rule_add(bpf_filter, filter_action, syscall, 0) != 0 {
return Err(CompilationError::LibSeccompRule);
}
}
} else if let Some(rules) = &rule.args {
let comparators = rules
.iter()
.map(|rule| rule.to_scmp_type())
.collect::<Vec<scmp_arg_cmp>>();
// SAFETY: Safe as all args are correct.
// We can assume no one will define u32::MAX
// filters for a syscall.
#[allow(clippy::cast_possible_truncation)]
unsafe {
if seccomp_rule_add_array(
bpf_filter,
filter_action,
syscall,
comparators.len() as u32,
comparators.as_ptr(),
) != 0
{
return Err(CompilationError::LibSeccompRule);
}
}
} else {
// SAFETY: Safe as all args are correct.
unsafe {
if seccomp_rule_add(bpf_filter, filter_action, syscall, 0) != 0 {
return Err(CompilationError::LibSeccompRule);
}
}
}
}
// SAFETY: Safe as all args are correect.
unsafe {
if seccomp_export_bpf(bpf_filter, memfd.as_raw_fd()) != 0 {
return Err(CompilationError::LibSeccompExport);
}
}
memfd.rewind().map_err(CompilationError::MemfdRewind)?;
// Cast is safe because usize == u64
#[allow(clippy::cast_possible_truncation)]
let size = memfd.metadata().unwrap().size() as usize;
// Bpf instructions are 8 byte values and 4 byte alignment.
// We use u64 to satisfy these requirements.
let instructions = size / std::mem::size_of::<u64>();
let mut bpf = vec![0_u64; instructions];
memfd
.read_exact(bpf.as_mut_bytes())
.map_err(CompilationError::MemfdRead)?;
memfd.rewind().map_err(CompilationError::MemfdRewind)?;
bpf_map.insert(name.clone(), bpf);
}
let mut output_file = File::create(out_path).map_err(CompilationError::OutputCreate)?;
bincode::encode_into_std_write(&bpf_map, &mut output_file, BINCODE_CONFIG)
.map_err(CompilationError::BincodeSerialize)?;
Ok(())
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/seccompiler/src/types.rs | src/seccompiler/src/types.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use std::collections::BTreeMap;
use std::ffi::CString;
use std::str::FromStr;
use serde::*;
// use libseccomp::{ScmpAction, ScmpArch, ScmpCompareOp};
use crate::bindings::*;
/// Comparison to perform when matching a condition.
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum SeccompCmpOp {
Eq,
Ge,
Gt,
Le,
Lt,
MaskedEq(u64),
Ne,
}
/// Seccomp argument value length.
#[derive(Clone, Debug, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum SeccompCmpArgLen {
/// Argument value length is 4 bytes.
Dword,
/// Argument value length is 8 bytes.
Qword,
}
/// Condition that syscall must match in order to satisfy a rule.
#[derive(Debug, Deserialize)]
pub struct SeccompCondition {
pub index: u8,
pub op: SeccompCmpOp,
pub val: u64,
#[serde(rename = "type")]
pub val_len: SeccompCmpArgLen,
}
impl SeccompCondition {
pub fn to_scmp_type(&self) -> scmp_arg_cmp {
match self.op {
SeccompCmpOp::Eq => {
// When using EQ libseccomp compares the whole 64 bits. In
// general this is not a problem, but for example we have
// observed musl `ioctl` to leave garbage in the upper bits of
// the `request` argument. There is a GH issue to allow 32bit
// comparisons (see
// https://github.com/seccomp/libseccomp/issues/383) but is not
// merged yet. Until that is available, do a masked comparison
// with the upper 32bits set to 0, so we will compare that `hi32
// & 0x0 == 0`, which is always true. This costs one additional
// instruction, but will be likely be optimized away by the BPF
// JIT.
match self.val_len {
SeccompCmpArgLen::Dword => scmp_arg_cmp {
arg: self.index as u32,
op: scmp_compare::SCMP_CMP_MASKED_EQ,
datum_a: 0x00000000FFFFFFFF,
datum_b: self.val,
},
SeccompCmpArgLen::Qword => scmp_arg_cmp {
arg: self.index as u32,
op: scmp_compare::SCMP_CMP_EQ,
datum_a: self.val,
datum_b: 0,
},
}
}
SeccompCmpOp::Ge => scmp_arg_cmp {
arg: self.index as u32,
op: scmp_compare::SCMP_CMP_GE,
datum_a: self.val,
datum_b: 0,
},
SeccompCmpOp::Gt => scmp_arg_cmp {
arg: self.index as u32,
op: scmp_compare::SCMP_CMP_GT,
datum_a: self.val,
datum_b: 0,
},
SeccompCmpOp::Le => scmp_arg_cmp {
arg: self.index as u32,
op: scmp_compare::SCMP_CMP_LE,
datum_a: self.val,
datum_b: 0,
},
SeccompCmpOp::Lt => scmp_arg_cmp {
arg: self.index as u32,
op: scmp_compare::SCMP_CMP_LT,
datum_a: self.val,
datum_b: 0,
},
SeccompCmpOp::Ne => scmp_arg_cmp {
arg: self.index as u32,
op: scmp_compare::SCMP_CMP_NE,
datum_a: self.val,
datum_b: 0,
},
SeccompCmpOp::MaskedEq(m) => scmp_arg_cmp {
arg: self.index as u32,
op: scmp_compare::SCMP_CMP_MASKED_EQ,
datum_a: m,
datum_b: self.val,
},
}
}
}
/// Actions that `seccomp` can apply to process calling a syscall.
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum SeccompAction {
Allow,
Errno(u16),
KillThread,
KillProcess,
Log,
Trace(u16),
Trap,
}
impl SeccompAction {
pub fn to_scmp_type(&self) -> u32 {
match self {
SeccompAction::Allow => SCMP_ACT_ALLOW,
SeccompAction::Errno(e) => SCMP_ACT_ERRNO(*e),
SeccompAction::KillThread => SCMP_ACT_KILL_THREAD,
SeccompAction::KillProcess => SCMP_ACT_KILL_PROCESS,
SeccompAction::Log => SCMP_ACT_LOG,
SeccompAction::Trace(t) => SCMP_ACT_TRACE(*t),
SeccompAction::Trap => SCMP_ACT_TRAP,
}
}
}
/// Rule that `seccomp` attempts to match for a syscall.
///
/// If all conditions match then rule gets matched.
/// The action of the first rule that matches will be applied to the calling process.
/// If no rule matches the default action is applied.
#[derive(Debug, Deserialize)]
pub struct SyscallRule {
pub syscall: CString,
pub args: Option<Vec<SeccompCondition>>,
}
/// Filter containing rules assigned to syscall numbers.
#[derive(Debug, Deserialize)]
pub struct Filter {
pub default_action: SeccompAction,
pub filter_action: SeccompAction,
pub filter: Vec<SyscallRule>,
}
/// Deserializable object that represents the Json filter file.
#[derive(Debug, Deserialize)]
pub struct BpfJson(pub BTreeMap<String, Filter>);
/// Supported target architectures.
#[derive(Debug)]
pub enum TargetArch {
X86_64,
Aarch64,
}
impl TargetArch {
pub fn to_scmp_type(&self) -> u32 {
match self {
TargetArch::X86_64 => SCMP_ARCH_X86_64,
TargetArch::Aarch64 => SCMP_ARCH_AARCH64,
}
}
}
impl FromStr for TargetArch {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"x86_64" => Ok(TargetArch::X86_64),
"aarch64" => Ok(TargetArch::Aarch64),
_ => Err(s.to_string()),
}
}
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
firecracker-microvm/firecracker | https://github.com/firecracker-microvm/firecracker/blob/f0691f8253d4bde225b9f70ecabf39b7ad796935/src/seccompiler/src/bin.rs | src/seccompiler/src/bin.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use clap::Parser;
use seccompiler::{CompilationError, compile_bpf};
const DEFAULT_OUTPUT_FILENAME: &str = "seccomp_binary_filter.out";
#[derive(Debug, Parser)]
#[command(version = format!("v{}", env!("CARGO_PKG_VERSION")))]
struct Cli {
#[arg(
short,
long,
help = "The computer architecture where the BPF program runs. Supported architectures: \
x86_64, aarch64."
)]
target_arch: String,
#[arg(short, long, help = "File path of the JSON input.")]
input_file: String,
#[arg(short, long, help = "Optional path of the output file.", default_value = DEFAULT_OUTPUT_FILENAME)]
output_file: String,
#[arg(
short,
long,
help = "Deprecated! Transforms the filters into basic filters. Drops all argument checks \
and rule-level actions. Not recommended."
)]
basic: bool,
}
fn main() -> Result<(), CompilationError> {
let cli = Cli::parse();
compile_bpf(
&cli.input_file,
&cli.target_arch,
&cli.output_file,
cli.basic,
)
}
| rust | Apache-2.0 | f0691f8253d4bde225b9f70ecabf39b7ad796935 | 2026-01-04T15:33:15.697747Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/lib.rs | tests/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/node_compat/test_runner.rs | tests/node_compat/test_runner.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use test_util as util;
use util::deno_config_path;
#[test]
fn node_compat_tests() {
let _server = util::http_server();
let mut deno = util::deno_cmd()
.current_dir(util::root_path())
.envs(util::env_vars_for_npm_tests())
.arg("test")
.arg("--config")
.arg(deno_config_path())
.arg("--no-lock")
.arg("-A")
.arg(util::tests_path().join("node_compat/test.ts"))
.spawn()
.expect("failed to spawn script");
let status = deno.wait().expect("failed to wait for the child process");
assert_eq!(Some(0), status.code());
assert!(status.success());
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/test_macro/lib.rs | tests/util/test_macro/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use proc_macro::TokenStream;
use quote::ToTokens;
use quote::quote;
use syn::ItemFn;
use syn::Meta;
use syn::ReturnType;
use syn::Token;
use syn::parse::Parser;
use syn::parse_macro_input;
use syn::punctuated::Punctuated;
#[derive(Default)]
struct TestAttributes {
flaky: bool,
timeout: Option<usize>,
}
#[proc_macro_attribute]
pub fn test(attr: TokenStream, item: TokenStream) -> TokenStream {
let attrs = parse_test_attributes(attr);
generate_test_macro(item, attrs)
}
fn parse_test_attributes(attr: TokenStream) -> TestAttributes {
// Parse as a comma-separated list of Meta items
let parser = Punctuated::<Meta, Token![,]>::parse_terminated;
let metas = match parser.parse(attr.clone()) {
Ok(metas) => metas,
Err(e) => {
panic!(
"Failed to parse test attributes: {}. Expected format: #[test], #[test(flaky)], or #[test(timeout = 60_000)]",
e
);
}
};
let mut result = TestAttributes::default();
for meta in metas {
match meta {
// Handle simple path like `flaky`
Meta::Path(path) => {
if path.is_ident("flaky") {
result.flaky = true;
} else {
let ident = path
.get_ident()
.map(|i| i.to_string())
.unwrap_or_else(|| path.to_token_stream().to_string());
panic!(
"Unknown test attribute: '{}'. Valid attributes are:\n - flaky\n - timeout = <number>",
ident
);
}
}
// Handle name-value pairs like `timeout = 60_000`
Meta::NameValue(name_value) => {
if name_value.path.is_ident("timeout") {
// Extract the literal value
match &name_value.value {
syn::Expr::Lit(expr_lit) => {
match &expr_lit.lit {
syn::Lit::Int(lit_int) => {
// Use base10_parse to automatically handle underscores
match lit_int.base10_parse::<usize>() {
Ok(value) => result.timeout = Some(value),
Err(e) => {
panic!(
"Invalid timeout value: '{}'. Error: {}. Expected a positive integer (e.g., timeout = 60_000).",
lit_int, e
);
}
}
}
_ => {
panic!(
"Invalid timeout value type. Expected an integer literal (e.g., timeout = 60_000), got: {:?}",
expr_lit.lit
);
}
}
}
_ => {
panic!(
"Invalid timeout value. Expected an integer literal (e.g., timeout = 60_000), got: {}",
quote::quote!(#name_value.value)
);
}
}
} else {
let ident = name_value
.path
.get_ident()
.map(|i| i.to_string())
.unwrap_or_else(|| name_value.path.to_token_stream().to_string());
panic!(
"Unknown test attribute: '{}'. Valid attributes are:\n - flaky\n - timeout = <number>",
ident
);
}
}
// Handle other meta types (List, etc.)
_ => {
panic!(
"Invalid test attribute format: '{}'. Expected format:\n - flaky\n - timeout = <number>",
quote::quote!(#meta)
);
}
}
}
result
}
fn generate_test_macro(
item: TokenStream,
attrs: TestAttributes,
) -> TokenStream {
let input = parse_macro_input!(item as ItemFn);
let fn_name = &input.sig.ident;
// Detect if the function is async
let is_async = input.sig.asyncness.is_some();
// Check for #[ignore] attribute
let is_ignored = input
.attrs
.iter()
.any(|attr| attr.path().is_ident("ignore"));
let timeout_expr = if let Some(timeout) = attrs.timeout {
quote! { Some(#timeout) }
} else {
quote! { None }
};
let is_flaky = attrs.flaky;
// Check if the function returns a Result
let returns_result = match &input.sig.output {
ReturnType::Type(_, ty) => {
if let syn::Type::Path(type_path) = &**ty {
type_path
.path
.segments
.last()
.is_some_and(|seg| seg.ident == "Result")
} else {
false
}
}
_ => false,
};
// Determine if we need a wrapper function
let needs_wrapper = is_async || returns_result;
let (test_func, func_def) = if needs_wrapper {
let wrapper_name =
syn::Ident::new(&format!("{}_wrapper", fn_name), fn_name.span());
let wrapper_body = if is_async {
let call = if returns_result {
quote! { #fn_name().await.unwrap(); }
} else {
quote! { #fn_name().await; }
};
quote! {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
rt.block_on(async {
#call
});
}
} else {
// Non-async, but returns Result
quote! {
#fn_name().unwrap();
}
};
let wrapper = quote! {
fn #wrapper_name() {
#wrapper_body
}
};
(quote! { #wrapper_name }, wrapper)
} else {
(quote! { #fn_name }, quote! {})
};
let expanded = quote! {
#input
#func_def
test_util::submit! {
test_util::TestMacroCase {
name: stringify!(#fn_name),
module_name: module_path!(),
func: #test_func,
flaky: #is_flaky,
file: file!(),
line: line!(),
col: column!(),
ignore: #is_ignored,
timeout: #timeout_expr,
}
}
};
TokenStream::from(expanded)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/integration_tests_runner.rs | tests/util/server/integration_tests_runner.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub fn main() {
// this file exists to cause the executable to be built when running cargo test
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/wildcard.rs | tests/util/server/src/wildcard.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use crate::colors;
pub enum WildcardMatchResult {
Success,
Fail(String),
}
impl WildcardMatchResult {
pub fn is_success(&self) -> bool {
matches!(self, WildcardMatchResult::Success)
}
}
pub fn wildcard_match_detailed(
pattern: &str,
text: &str,
) -> WildcardMatchResult {
fn annotate_whitespace(text: &str) -> String {
text.replace('\t', "\u{2192}").replace(' ', "\u{00B7}")
}
// Normalize line endings
let original_text = text.replace("\r\n", "\n");
let mut current_text = original_text.as_str();
// normalize line endings and strip comments
let pattern = pattern
.split('\n')
.map(|line| line.trim_end_matches('\r'))
.filter(|l| {
let is_comment = l.starts_with("[#") && l.ends_with(']');
!is_comment
})
.collect::<Vec<_>>()
.join("\n");
let mut output_lines = Vec::new();
let parts = parse_wildcard_pattern_text(&pattern).unwrap();
let mut was_last_wildcard = false;
let mut was_last_wildline = false;
for (i, part) in parts.iter().enumerate() {
match part {
WildcardPatternPart::Wildcard => {
output_lines.push("<WILDCARD />".to_string());
}
WildcardPatternPart::Wildline => {
output_lines.push("<WILDLINE />".to_string());
}
WildcardPatternPart::Wildnum(times) => {
if current_text.len() < *times {
output_lines
.push(format!("==== HAD MISSING WILDCHARS({}) ====", times));
output_lines.push(colors::red(annotate_whitespace(current_text)));
return WildcardMatchResult::Fail(output_lines.join("\n"));
}
output_lines.push(format!("<WILDCHARS({}) />", times));
current_text = ¤t_text[*times..];
}
WildcardPatternPart::Text(search_text) => {
let is_last = i + 1 == parts.len();
let search_index = if is_last && was_last_wildcard {
// search from the end of the file
current_text.rfind(search_text)
} else if was_last_wildline {
if is_last {
find_last_text_on_line(search_text, current_text)
} else {
find_first_text_on_line(search_text, current_text)
}
} else {
current_text.find(search_text)
};
match search_index {
Some(found_index)
if was_last_wildcard || was_last_wildline || found_index == 0 =>
{
output_lines.push(format!(
"<FOUND>{}</FOUND>",
colors::gray(annotate_whitespace(search_text))
));
current_text = ¤t_text[found_index + search_text.len()..];
}
Some(index) => {
output_lines.push(
"==== FOUND SEARCH TEXT IN WRONG POSITION ====".to_string(),
);
output_lines.push(colors::gray(annotate_whitespace(search_text)));
output_lines
.push("==== HAD UNKNOWN PRECEDING TEXT ====".to_string());
output_lines
.push(colors::red(annotate_whitespace(¤t_text[..index])));
return WildcardMatchResult::Fail(output_lines.join("\n"));
}
None => {
let was_wildcard_or_line = was_last_wildcard || was_last_wildline;
let mut max_search_text_found_index = 0;
let mut max_current_text_found_index = 0;
for (index, _) in search_text.char_indices() {
let sub_string = &search_text[..index];
if let Some(found_index) = current_text.find(sub_string) {
if was_wildcard_or_line || found_index == 0 {
max_search_text_found_index = index;
max_current_text_found_index = found_index;
} else {
break;
}
} else {
break;
}
}
if !was_wildcard_or_line && max_search_text_found_index > 0 {
output_lines.push(format!(
"<FOUND>{}</FOUND>",
colors::gray(annotate_whitespace(
&search_text[..max_search_text_found_index]
))
));
}
output_lines
.push("==== COULD NOT FIND SEARCH TEXT ====".to_string());
output_lines.push(colors::green(annotate_whitespace(
if was_wildcard_or_line {
search_text
} else {
&search_text[max_search_text_found_index..]
},
)));
if was_wildcard_or_line && max_search_text_found_index > 0 {
output_lines.push(format!(
"==== MAX FOUND ====\n{}",
colors::red(annotate_whitespace(
&search_text[..max_search_text_found_index]
))
));
}
let actual_next_text =
¤t_text[max_current_text_found_index..];
let next_text_len = actual_next_text
.chars()
.take(40)
.map(|c| c.len_utf8())
.sum::<usize>();
output_lines.push(format!(
"==== NEXT ACTUAL TEXT ====\n{}{}",
colors::red(annotate_whitespace(
&actual_next_text[..next_text_len]
)),
if actual_next_text.len() > next_text_len {
"[TRUNCATED]"
} else {
""
},
));
return WildcardMatchResult::Fail(output_lines.join("\n"));
}
}
}
WildcardPatternPart::UnorderedLines(expected_lines) => {
assert!(!was_last_wildcard, "unsupported");
assert!(!was_last_wildline, "unsupported");
let mut actual_lines = Vec::with_capacity(expected_lines.len());
for _ in 0..expected_lines.len() {
match current_text.find('\n') {
Some(end_line_index) => {
actual_lines.push(¤t_text[..end_line_index]);
current_text = ¤t_text[end_line_index + 1..];
}
None => {
break;
}
}
}
actual_lines.sort_unstable();
let mut expected_lines = expected_lines.clone();
expected_lines.sort_unstable();
if actual_lines.len() != expected_lines.len() {
output_lines
.push("==== HAD WRONG NUMBER OF UNORDERED LINES ====".to_string());
output_lines.push("# ACTUAL".to_string());
output_lines.extend(
actual_lines
.iter()
.map(|l| colors::green(annotate_whitespace(l))),
);
output_lines.push("# EXPECTED".to_string());
output_lines.extend(
expected_lines
.iter()
.map(|l| colors::green(annotate_whitespace(l))),
);
return WildcardMatchResult::Fail(output_lines.join("\n"));
}
if let Some(invalid_expected) =
expected_lines.iter().find(|e| e.contains("[WILDCARD]"))
{
panic!(
concat!(
"Cannot use [WILDCARD] inside [UNORDERED_START]. Use [WILDLINE] instead.\n",
" Invalid expected line: {}"
),
invalid_expected
);
}
for actual_line in actual_lines {
let maybe_found_index =
expected_lines.iter().position(|expected_line| {
actual_line == *expected_line
|| wildcard_match_detailed(expected_line, actual_line)
.is_success()
});
if let Some(found_index) = maybe_found_index {
let expected = expected_lines.remove(found_index);
output_lines.push(format!(
"<FOUND>{}</FOUND>",
colors::gray(annotate_whitespace(expected))
));
} else {
output_lines
.push("==== UNORDERED LINE DID NOT MATCH ====".to_string());
output_lines.push(format!(
" ACTUAL: {}",
colors::red(annotate_whitespace(actual_line))
));
for expected in expected_lines {
output_lines.push(format!(
" EXPECTED ANY: {}",
colors::green(annotate_whitespace(expected))
));
}
return WildcardMatchResult::Fail(output_lines.join("\n"));
}
}
}
}
was_last_wildcard = matches!(part, WildcardPatternPart::Wildcard);
was_last_wildline = matches!(part, WildcardPatternPart::Wildline);
}
if was_last_wildcard || was_last_wildline || current_text.is_empty() {
WildcardMatchResult::Success
} else if current_text == "\n" {
WildcardMatchResult::Fail(
"<matched everything>\n!!!! PROBLEM: Missing final newline at end of expected output !!!!"
.to_string(),
)
} else {
output_lines.push("==== HAD TEXT AT END OF FILE ====".to_string());
output_lines.push(colors::red(annotate_whitespace(current_text)));
WildcardMatchResult::Fail(output_lines.join("\n"))
}
}
#[derive(Debug)]
enum WildcardPatternPart<'a> {
Wildcard,
Wildline,
Wildnum(usize),
Text(&'a str),
UnorderedLines(Vec<&'a str>),
}
fn parse_wildcard_pattern_text(
text: &str,
) -> Result<Vec<WildcardPatternPart<'_>>, monch::ParseErrorFailureError> {
use monch::*;
fn parse_unordered_lines(input: &str) -> ParseResult<'_, Vec<&str>> {
const END_TEXT: &str = "\n[UNORDERED_END]\n";
let (input, _) = tag("[UNORDERED_START]\n")(input)?;
match input.find(END_TEXT) {
Some(end_index) => ParseResult::Ok((
&input[end_index + END_TEXT.len()..],
input[..end_index].lines().collect::<Vec<_>>(),
)),
None => ParseError::fail(input, "Could not find [UNORDERED_END]"),
}
}
enum InnerPart<'a> {
Wildcard,
Wildline,
Wildchars(usize),
UnorderedLines(Vec<&'a str>),
Char,
}
struct Parser<'a> {
current_input: &'a str,
last_text_input: &'a str,
parts: Vec<WildcardPatternPart<'a>>,
}
impl<'a> Parser<'a> {
fn parse(mut self) -> ParseResult<'a, Vec<WildcardPatternPart<'a>>> {
fn parse_num(input: &str) -> ParseResult<'_, usize> {
let num_char_count =
input.chars().take_while(|c| c.is_ascii_digit()).count();
if num_char_count == 0 {
return ParseError::backtrace();
}
let (char_text, input) = input.split_at(num_char_count);
let value = str::parse::<usize>(char_text).unwrap();
Ok((input, value))
}
fn parse_wild_char(input: &str) -> ParseResult<'_, ()> {
let (input, _) = tag("[WILDCHAR]")(input)?;
ParseResult::Ok((input, ()))
}
fn parse_wild_chars(input: &str) -> ParseResult<'_, usize> {
let (input, _) = tag("[WILDCHARS(")(input)?;
let (input, times) = parse_num(input)?;
let (input, _) = tag(")]")(input)?;
ParseResult::Ok((input, times))
}
while !self.current_input.is_empty() {
let (next_input, inner_part) = or6(
map(tag("[WILDCARD]"), |_| InnerPart::Wildcard),
map(tag("[WILDLINE]"), |_| InnerPart::Wildline),
map(parse_wild_char, |_| InnerPart::Wildchars(1)),
map(parse_wild_chars, InnerPart::Wildchars),
map(parse_unordered_lines, |lines| {
InnerPart::UnorderedLines(lines)
}),
map(next_char, |_| InnerPart::Char),
)(self.current_input)?;
match inner_part {
InnerPart::Wildcard => {
self.queue_previous_text(next_input);
self.parts.push(WildcardPatternPart::Wildcard);
}
InnerPart::Wildline => {
self.queue_previous_text(next_input);
self.parts.push(WildcardPatternPart::Wildline);
}
InnerPart::Wildchars(times) => {
self.queue_previous_text(next_input);
self.parts.push(WildcardPatternPart::Wildnum(times));
}
InnerPart::UnorderedLines(expected_lines) => {
self.queue_previous_text(next_input);
self
.parts
.push(WildcardPatternPart::UnorderedLines(expected_lines));
}
InnerPart::Char => {
// ignore
}
}
self.current_input = next_input;
}
self.queue_previous_text("");
ParseResult::Ok(("", self.parts))
}
fn queue_previous_text(&mut self, next_input: &'a str) {
let previous_text = &self.last_text_input
[..self.last_text_input.len() - self.current_input.len()];
if !previous_text.is_empty() {
self.parts.push(WildcardPatternPart::Text(previous_text));
}
self.last_text_input = next_input;
}
}
with_failure_handling(|input| {
Parser {
current_input: input,
last_text_input: input,
parts: Vec::new(),
}
.parse()
})(text)
}
fn find_first_text_on_line(
search_text: &str,
current_text: &str,
) -> Option<usize> {
let end_search_pos = current_text.find('\n').unwrap_or(current_text.len());
let found_pos = current_text.find(search_text)?;
if found_pos <= end_search_pos {
Some(found_pos)
} else {
None
}
}
fn find_last_text_on_line(
search_text: &str,
current_text: &str,
) -> Option<usize> {
let end_search_pos = current_text.find('\n').unwrap_or(current_text.len());
let mut best_match = None;
let mut search_pos = 0;
while let Some(new_pos) = current_text[search_pos..].find(search_text) {
search_pos += new_pos;
if search_pos <= end_search_pos {
best_match = Some(search_pos);
} else {
break;
}
search_pos += 1;
}
best_match
}
#[cfg(test)]
mod test {
use super::*;
use crate::assert_contains;
#[test]
fn parse_parse_wildcard_match_text() {
let result =
parse_wildcard_pattern_text("[UNORDERED_START]\ntesting\ntesting")
.err()
.unwrap();
assert_contains!(result.to_string(), "Could not find [UNORDERED_END]");
}
#[test]
fn test_wildcard_match() {
let fixtures = vec![
("foobarbaz", "foobarbaz", true),
("[WILDCARD]", "foobarbaz", true),
("foobar", "foobarbaz", false),
("foo[WILDCARD]baz", "foobarbaz", true),
("foo[WILDCARD]baz", "foobazbar", false),
("foo[WILDCARD]baz[WILDCARD]qux", "foobarbazqatqux", true),
("foo[WILDCARD]", "foobar", true),
("foo[WILDCARD]baz[WILDCARD]", "foobarbazqat", true),
// check with different line endings
("foo[WILDCARD]\nbaz[WILDCARD]\n", "foobar\nbazqat\n", true),
(
"foo[WILDCARD]\nbaz[WILDCARD]\n",
"foobar\r\nbazqat\r\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\n",
"foobar\nbazqat\r\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\r\n",
"foobar\nbazqat\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\r\n",
"foobar\r\nbazqat\r\n",
true,
),
];
// Iterate through the fixture lists, testing each one
for (pattern, string, expected) in fixtures {
let actual = wildcard_match_detailed(pattern, string).is_success();
dbg!(pattern, string, expected);
assert_eq!(actual, expected);
}
}
#[test]
fn test_wildcard_match2() {
let wildcard_match = |pattern: &str, text: &str| {
wildcard_match_detailed(pattern, text).is_success()
};
// foo, bar, baz, qux, quux, quuz, corge, grault, garply, waldo, fred, plugh, xyzzy
assert!(wildcard_match("foo[WILDCARD]baz", "foobarbaz"));
assert!(!wildcard_match("foo[WILDCARD]baz", "foobazbar"));
let multiline_pattern = "[WILDCARD]
foo:
[WILDCARD]baz[WILDCARD]";
fn multi_line_builder(input: &str, leading_text: Option<&str>) -> String {
// If there is leading text add a newline so it's on it's own line
let head = match leading_text {
Some(v) => format!("{v}\n"),
None => "".to_string(),
};
format!(
"{head}foo:
quuz {input} corge
grault"
)
}
// Validate multi-line string builder
assert_eq!(
"QUUX=qux
foo:
quuz BAZ corge
grault",
multi_line_builder("BAZ", Some("QUUX=qux"))
);
// Correct input & leading line
assert!(wildcard_match(
multiline_pattern,
&multi_line_builder("baz", Some("QUX=quux")),
));
// Should fail when leading line
assert!(!wildcard_match(
multiline_pattern,
&multi_line_builder("baz", None),
));
// Incorrect input & leading line
assert!(!wildcard_match(
multiline_pattern,
&multi_line_builder("garply", Some("QUX=quux")),
));
// Incorrect input & no leading line
assert!(!wildcard_match(
multiline_pattern,
&multi_line_builder("garply", None),
));
// wildline
assert!(wildcard_match("foo[WILDLINE]baz", "foobarbaz"));
assert!(wildcard_match("foo[WILDLINE]bar", "foobarbar"));
assert!(!wildcard_match("foo[WILDLINE]baz", "fooba\nrbaz"));
assert!(wildcard_match("foo[WILDLINE]", "foobar"));
// wildnum
assert!(wildcard_match("foo[WILDCHARS(3)]baz", "foobarbaz"));
assert!(!wildcard_match("foo[WILDCHARS(4)]baz", "foobarbaz"));
assert!(!wildcard_match("foo[WILDCHARS(2)]baz", "foobarbaz"));
assert!(!wildcard_match("foo[WILDCHARS(1)]baz", "foobarbaz"));
assert!(!wildcard_match("foo[WILDCHARS(20)]baz", "foobarbaz"));
}
#[test]
fn test_wildcard_match_unordered_lines() {
let wildcard_match = |pattern: &str, text: &str| {
wildcard_match_detailed(pattern, text).is_success()
};
// matching
assert!(wildcard_match(
concat!("[UNORDERED_START]\n", "B\n", "A\n", "[UNORDERED_END]\n"),
concat!("A\n", "B\n",)
));
// different line
assert!(!wildcard_match(
concat!("[UNORDERED_START]\n", "Ba\n", "A\n", "[UNORDERED_END]\n"),
concat!("A\n", "B\n",)
));
// different number of lines
assert!(!wildcard_match(
concat!(
"[UNORDERED_START]\n",
"B\n",
"A\n",
"C\n",
"[UNORDERED_END]\n"
),
concat!("A\n", "B\n",)
));
}
#[test]
fn test_find_first_text_on_line() {
let text = "foo\nbar\nbaz";
assert_eq!(find_first_text_on_line("foo", text), Some(0));
assert_eq!(find_first_text_on_line("oo", text), Some(1));
assert_eq!(find_first_text_on_line("o", text), Some(1));
assert_eq!(find_first_text_on_line("o\nbar", text), Some(2));
assert_eq!(find_first_text_on_line("f", text), Some(0));
assert_eq!(find_first_text_on_line("bar", text), None);
}
#[test]
fn test_find_last_text_on_line() {
let text = "foo\nbar\nbaz";
assert_eq!(find_last_text_on_line("foo", text), Some(0));
assert_eq!(find_last_text_on_line("oo", text), Some(1));
assert_eq!(find_last_text_on_line("o", text), Some(2));
assert_eq!(find_last_text_on_line("o\nbar", text), Some(2));
assert_eq!(find_last_text_on_line("f", text), Some(0));
assert_eq!(find_last_text_on_line("bar", text), None);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/test_server.rs | tests/util/server/src/test_server.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#![allow(clippy::print_stdout)]
#![allow(clippy::print_stderr)]
fn main() {
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.unwrap();
setup_panic_hook();
test_server::servers::run_all_servers();
}
fn setup_panic_hook() {
// Tokio does not exit the process when a task panics, so we define a custom
// panic hook to implement this behaviour.
let orig_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |panic_info| {
eprintln!("\n============================================================");
eprintln!("Test server panicked!\n");
orig_hook(panic_info);
std::process::exit(1);
}));
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/lib.rs | tests/util/server/src/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::env;
use std::io::Write;
use std::path::PathBuf;
use std::process::Child;
use std::process::Command;
use std::process::Output;
use std::process::Stdio;
use std::result::Result;
use futures::FutureExt;
use futures::Stream;
use futures::StreamExt;
use once_cell::sync::Lazy;
use parking_lot::Mutex;
use parking_lot::MutexGuard;
use pretty_assertions::assert_eq;
use pty::Pty;
use tokio::net::TcpStream;
use url::Url;
pub mod assertions;
mod builders;
mod fs;
mod https;
pub mod lsp;
mod macros;
mod npm;
mod parsers;
pub mod print;
pub mod pty;
mod semaphore;
pub mod servers;
pub mod spawn;
pub mod test_runner;
mod wildcard;
pub use builders::DenoChild;
pub use builders::TestCommandBuilder;
pub use builders::TestCommandOutput;
pub use builders::TestContext;
pub use builders::TestContextBuilder;
pub use fs::PathRef;
pub use fs::TempDir;
pub use fs::url_to_notebook_cell_uri;
pub use fs::url_to_uri;
pub use inventory::submit;
pub use parsers::StraceOutput;
pub use parsers::WrkOutput;
pub use parsers::parse_max_mem;
pub use parsers::parse_strace_output;
pub use parsers::parse_wrk_output;
pub use test_macro::test;
pub use wildcard::WildcardMatchResult;
pub use wildcard::wildcard_match_detailed;
pub const PERMISSION_VARIANTS: [&str; 5] =
["read", "write", "env", "net", "run"];
pub const PERMISSION_DENIED_PATTERN: &str = "NotCapable";
static GUARD: Lazy<Mutex<HttpServerCount>> = Lazy::new(Default::default);
pub static IS_CI: Lazy<bool> = Lazy::new(|| std::env::var("CI").is_ok());
pub fn env_vars_for_npm_tests() -> Vec<(String, String)> {
vec![
("NPM_CONFIG_REGISTRY".to_string(), npm_registry_url()),
("NODEJS_ORG_MIRROR".to_string(), nodejs_org_mirror_url()),
("NO_COLOR".to_string(), "1".to_string()),
("SOCKET_DEV_URL".to_string(), socket_dev_api_url()),
]
}
pub fn env_vars_for_jsr_tests_with_git_check() -> Vec<(String, String)> {
vec![
("JSR_URL".to_string(), jsr_registry_url()),
("DISABLE_JSR_PROVENANCE".to_string(), "true".to_string()),
("NO_COLOR".to_string(), "1".to_string()),
]
}
pub fn env_vars_for_jsr_tests() -> Vec<(String, String)> {
let mut vars = env_vars_for_jsr_tests_with_git_check();
vars.push((
"DENO_TESTING_DISABLE_GIT_CHECK".to_string(),
"1".to_string(),
));
vars
}
pub fn env_vars_for_jsr_provenance_tests() -> Vec<(String, String)> {
let mut envs = env_vars_for_jsr_tests();
envs.retain(|(key, _)| key != "DISABLE_JSR_PROVENANCE");
envs.extend(vec![
("REKOR_URL".to_string(), rekor_url()),
("FULCIO_URL".to_string(), fulcio_url()),
(
"DISABLE_JSR_MANIFEST_VERIFICATION_FOR_TESTING".to_string(),
"true".to_string(),
),
]);
// set GHA variable for attestation.
envs.extend([
("CI".to_string(), "true".to_string()),
("GITHUB_ACTIONS".to_string(), "true".to_string()),
("ACTIONS_ID_TOKEN_REQUEST_URL".to_string(), gha_token_url()),
(
"ACTIONS_ID_TOKEN_REQUEST_TOKEN".to_string(),
"dummy".to_string(),
),
(
"GITHUB_REPOSITORY".to_string(),
"littledivy/deno_sdl2".to_string(),
),
(
"GITHUB_SERVER_URL".to_string(),
"https://github.com".to_string(),
),
("GITHUB_REF".to_string(), "refs/tags/sdl2@0.0.1".to_string()),
("GITHUB_SHA".to_string(), "lol".to_string()),
("GITHUB_RUN_ID".to_string(), "1".to_string()),
("GITHUB_RUN_ATTEMPT".to_string(), "1".to_string()),
(
"RUNNER_ENVIRONMENT".to_string(),
"github-hosted".to_string(),
),
(
"GITHUB_WORKFLOW_REF".to_string(),
"littledivy/deno_sdl2@refs/tags/sdl2@0.0.1".to_string(),
),
]);
envs
}
pub fn env_vars_for_jsr_npm_tests() -> Vec<(String, String)> {
vec![
("NPM_CONFIG_REGISTRY".to_string(), npm_registry_url()),
("JSR_URL".to_string(), jsr_registry_url()),
(
"DENO_TESTING_DISABLE_GIT_CHECK".to_string(),
"1".to_string(),
),
("DISABLE_JSR_PROVENANCE".to_string(), "true".to_string()),
("NO_COLOR".to_string(), "1".to_string()),
("NODEJS_ORG_MIRROR".to_string(), nodejs_org_mirror_url()),
]
}
pub fn root_path() -> PathRef {
PathRef::new(
PathBuf::from(concat!(env!("CARGO_MANIFEST_DIR")))
.parent()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap(),
)
}
pub fn prebuilt_path() -> PathRef {
third_party_path().join("prebuilt")
}
pub fn tests_path() -> PathRef {
root_path().join("tests")
}
pub fn testdata_path() -> PathRef {
tests_path().join("testdata")
}
pub fn third_party_path() -> PathRef {
root_path().join("third_party")
}
pub fn ffi_tests_path() -> PathRef {
root_path().join("tests").join("ffi")
}
pub fn napi_tests_path() -> PathRef {
root_path().join("tests").join("napi")
}
pub fn deno_config_path() -> PathRef {
root_path().join("tests").join("config").join("deno.json")
}
/// Test server registry url.
pub fn npm_registry_url() -> String {
format!("http://localhost:{}/", servers::PUBLIC_NPM_REGISTRY_PORT)
}
pub fn npm_registry_unset_url() -> String {
"http://NPM_CONFIG_REGISTRY.is.unset".to_string()
}
pub fn nodejs_org_mirror_url() -> String {
format!(
"http://127.0.0.1:{}/",
servers::NODEJS_ORG_MIRROR_SERVER_PORT
)
}
pub fn nodejs_org_mirror_unset_url() -> String {
"http://NODEJS_ORG_MIRROR.is.unset".to_string()
}
pub fn jsr_registry_url() -> String {
format!("http://127.0.0.1:{}/", servers::JSR_REGISTRY_SERVER_PORT)
}
pub fn rekor_url() -> String {
format!("http://127.0.0.1:{}", servers::PROVENANCE_MOCK_SERVER_PORT)
}
pub fn fulcio_url() -> String {
format!("http://127.0.0.1:{}", servers::PROVENANCE_MOCK_SERVER_PORT)
}
pub fn gha_token_url() -> String {
format!(
"http://127.0.0.1:{}/gha_oidc?test=true",
servers::PROVENANCE_MOCK_SERVER_PORT
)
}
pub fn socket_dev_api_url() -> String {
format!("http://localhost:{}/", servers::SOCKET_DEV_API_PORT)
}
pub fn jsr_registry_unset_url() -> String {
"http://JSR_URL.is.unset".to_string()
}
pub fn std_path() -> PathRef {
root_path().join("tests").join("util").join("std")
}
pub fn std_file_url() -> String {
Url::from_directory_path(std_path()).unwrap().to_string()
}
pub fn target_dir() -> PathRef {
let current_exe = std::env::current_exe().unwrap();
let target_dir = current_exe.parent().unwrap().parent().unwrap();
PathRef::new(target_dir)
}
pub fn deno_exe_path() -> PathRef {
// Something like /Users/rld/src/deno/target/debug/deps/deno
let mut p = target_dir().join("deno").to_path_buf();
if cfg!(windows) {
p.set_extension("exe");
}
PathRef::new(p)
}
pub fn denort_exe_path() -> PathRef {
let mut p = target_dir().join("denort").to_path_buf();
if cfg!(windows) {
p.set_extension("exe");
}
PathRef::new(p)
}
pub fn prebuilt_tool_path(tool: &str) -> PathRef {
let mut exe = tool.to_string();
exe.push_str(if cfg!(windows) { ".exe" } else { "" });
prebuilt_path().join(platform_dir_name()).join(exe)
}
pub fn platform_dir_name() -> &'static str {
if cfg!(target_os = "linux") {
"linux64"
} else if cfg!(target_os = "macos") {
"mac"
} else if cfg!(target_os = "windows") {
"win"
} else {
unreachable!()
}
}
pub fn test_server_path() -> PathBuf {
let mut p = target_dir().join("test_server").to_path_buf();
if cfg!(windows) {
p.set_extension("exe");
}
p
}
fn ensure_test_server_built() {
// if the test server doesn't exist then remind the developer to build first
if !test_server_path().exists() {
panic!(
"Test server not found. Please cargo build before running the tests."
);
}
}
/// Returns a [`Stream`] of [`TcpStream`]s accepted from the given port.
async fn get_tcp_listener_stream(
name: &'static str,
port: u16,
) -> impl Stream<Item = Result<TcpStream, std::io::Error>> + Unpin + Send {
let host_and_port = &format!("localhost:{port}");
// Listen on ALL addresses that localhost can resolves to.
let accept = |listener: tokio::net::TcpListener| {
async {
let result = listener.accept().await;
Some((result.map(|r| r.0), listener))
}
.boxed()
};
let mut addresses = vec![];
let listeners = tokio::net::lookup_host(host_and_port)
.await
.expect(host_and_port)
.inspect(|address| addresses.push(*address))
.map(tokio::net::TcpListener::bind)
.collect::<futures::stream::FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.map(|s| s.unwrap())
.map(|listener| futures::stream::unfold(listener, accept))
.collect::<Vec<_>>();
// Eye catcher for HttpServerCount
println!("ready: {name} on {:?}", addresses);
futures::stream::select_all(listeners)
}
pub const TEST_SERVERS_COUNT: usize = 35;
#[derive(Default)]
struct HttpServerCount {
count: usize,
test_server: Option<HttpServerStarter>,
}
impl HttpServerCount {
fn inc(&mut self) {
self.count += 1;
if self.test_server.is_none() {
self.test_server = Some(Default::default());
}
}
fn dec(&mut self) {
assert!(self.count > 0);
self.count -= 1;
if self.count == 0 {
self.test_server.take();
}
}
}
impl Drop for HttpServerCount {
fn drop(&mut self) {
assert_eq!(self.count, 0);
assert!(self.test_server.is_none());
}
}
struct HttpServerStarter {
test_server: Child,
}
impl Default for HttpServerStarter {
fn default() -> Self {
println!("test_server starting...");
let mut test_server = Command::new(test_server_path())
.current_dir(testdata_path())
.stdout(Stdio::piped())
.spawn()
.inspect_err(|_| {
ensure_test_server_built();
})
.expect("failed to execute test_server");
let stdout = test_server.stdout.as_mut().unwrap();
use std::io::BufRead;
use std::io::BufReader;
let lines = BufReader::new(stdout).lines();
// Wait for all the servers to report being ready.
let mut ready_count = 0;
for maybe_line in lines {
if let Ok(line) = maybe_line {
if line.starts_with("ready:") {
ready_count += 1;
}
if ready_count == TEST_SERVERS_COUNT {
break;
}
} else {
panic!("{}", maybe_line.unwrap_err());
}
}
Self { test_server }
}
}
impl Drop for HttpServerStarter {
fn drop(&mut self) {
match self.test_server.try_wait() {
Ok(None) => {
self.test_server.kill().expect("failed to kill test_server");
let _ = self.test_server.wait();
}
Ok(Some(status)) => {
panic!("test_server exited unexpectedly {status}")
}
Err(e) => panic!("test_server error: {e}"),
}
}
}
fn lock_http_server<'a>() -> MutexGuard<'a, HttpServerCount> {
GUARD.lock()
}
pub struct HttpServerGuard {}
impl Drop for HttpServerGuard {
fn drop(&mut self) {
let mut g = lock_http_server();
g.dec();
}
}
/// Adds a reference to a shared target/debug/test_server subprocess. When the
/// last instance of the HttpServerGuard is dropped, the subprocess will be
/// killed.
pub fn http_server() -> HttpServerGuard {
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let mut g = lock_http_server();
g.inc();
HttpServerGuard {}
}
/// Helper function to strip ansi codes.
pub fn strip_ansi_codes(s: &str) -> std::borrow::Cow<'_, str> {
console_static_text::ansi::strip_ansi_codes(s)
}
pub fn run(
cmd: &[&str],
input: Option<&[&str]>,
envs: Option<Vec<(String, String)>>,
current_dir: Option<&str>,
expect_success: bool,
) {
let mut process_builder = Command::new(cmd[0]);
process_builder.args(&cmd[1..]).stdin(Stdio::piped());
if let Some(dir) = current_dir {
process_builder.current_dir(dir);
}
if let Some(envs) = envs {
process_builder.envs(envs);
}
let mut prog = process_builder.spawn().expect("failed to spawn script");
if let Some(lines) = input {
let stdin = prog.stdin.as_mut().expect("failed to get stdin");
stdin
.write_all(lines.join("\n").as_bytes())
.expect("failed to write to stdin");
}
let status = prog.wait().expect("failed to wait on child");
if expect_success != status.success() {
panic!("Unexpected exit code: {:?}", status.code());
}
}
pub fn run_collect(
cmd: &[&str],
input: Option<&[&str]>,
envs: Option<Vec<(String, String)>>,
current_dir: Option<&str>,
expect_success: bool,
) -> (String, String) {
let mut process_builder = Command::new(cmd[0]);
process_builder
.args(&cmd[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
if let Some(dir) = current_dir {
process_builder.current_dir(dir);
}
if let Some(envs) = envs {
process_builder.envs(envs);
}
let mut prog = process_builder.spawn().expect("failed to spawn script");
if let Some(lines) = input {
let stdin = prog.stdin.as_mut().expect("failed to get stdin");
stdin
.write_all(lines.join("\n").as_bytes())
.expect("failed to write to stdin");
}
let Output {
stdout,
stderr,
status,
} = prog.wait_with_output().expect("failed to wait on child");
let stdout = String::from_utf8(stdout).unwrap();
let stderr = String::from_utf8(stderr).unwrap();
if expect_success != status.success() {
eprintln!("stdout: <<<{stdout}>>>");
eprintln!("stderr: <<<{stderr}>>>");
panic!("Unexpected exit code: {:?}", status.code());
}
(stdout, stderr)
}
pub fn run_and_collect_output(
expect_success: bool,
args: &str,
input: Option<Vec<&str>>,
envs: Option<Vec<(String, String)>>,
need_http_server: bool,
) -> (String, String) {
run_and_collect_output_with_args(
expect_success,
args.split_whitespace().collect(),
input,
envs,
need_http_server,
)
}
pub fn run_and_collect_output_with_args(
expect_success: bool,
args: Vec<&str>,
input: Option<Vec<&str>>,
envs: Option<Vec<(String, String)>>,
need_http_server: bool,
) -> (String, String) {
let mut deno_process_builder = deno_cmd()
.args_vec(args)
.current_dir(testdata_path())
.stdin(Stdio::piped())
.piped_output();
if let Some(envs) = envs {
deno_process_builder = deno_process_builder.envs(envs);
}
let _http_guard = if need_http_server {
Some(http_server())
} else {
None
};
let mut deno = deno_process_builder
.spawn()
.expect("failed to spawn script");
if let Some(lines) = input {
let stdin = deno.stdin.as_mut().expect("failed to get stdin");
stdin
.write_all(lines.join("\n").as_bytes())
.expect("failed to write to stdin");
}
let Output {
stdout,
stderr,
status,
} = deno.wait_with_output().expect("failed to wait on child");
let stdout = String::from_utf8(stdout).unwrap();
let stderr = String::from_utf8(stderr).unwrap();
if expect_success != status.success() {
eprintln!("stdout: <<<{stdout}>>>");
eprintln!("stderr: <<<{stderr}>>>");
panic!("Unexpected exit code: {:?}", status.code());
}
(stdout, stderr)
}
pub fn new_deno_dir() -> TempDir {
TempDir::new()
}
pub fn deno_cmd() -> TestCommandBuilder {
let deno_dir = new_deno_dir();
deno_cmd_with_deno_dir(&deno_dir)
}
pub fn deno_cmd_with_deno_dir(deno_dir: &TempDir) -> TestCommandBuilder {
TestCommandBuilder::new(deno_dir.clone())
.env("DENO_DIR", deno_dir.path())
.env("NPM_CONFIG_REGISTRY", npm_registry_unset_url())
.env("NODEJS_ORG_MIRROR", nodejs_org_mirror_unset_url())
.env("JSR_URL", jsr_registry_unset_url())
}
pub fn run_powershell_script_file(
script_file_path: &str,
args: Vec<&str>,
) -> std::result::Result<(), i64> {
let deno_dir = new_deno_dir();
let mut command = Command::new("powershell.exe");
command
.env("DENO_DIR", deno_dir.path())
.current_dir(testdata_path())
.arg("-file")
.arg(script_file_path);
for arg in args {
command.arg(arg);
}
let output = command.output().expect("failed to spawn script");
let stdout = String::from_utf8(output.stdout).unwrap();
let stderr = String::from_utf8(output.stderr).unwrap();
println!("{stdout}");
if !output.status.success() {
panic!(
"{script_file_path} executed with failing error code\n{stdout}{stderr}"
);
}
Ok(())
}
#[derive(Debug, Default)]
pub struct CheckOutputIntegrationTest<'a> {
pub args: &'a str,
pub args_vec: Vec<&'a str>,
pub output: &'a str,
pub input: Option<&'a str>,
pub output_str: Option<&'a str>,
pub exit_code: i32,
pub http_server: bool,
pub envs: Vec<(String, String)>,
pub env_clear: bool,
pub temp_cwd: bool,
/// Copies the files at the specified directory in the "testdata" directory
/// to the temp folder and runs the test from there. This is useful when
/// the test creates files in the testdata directory (ex. a node_modules folder)
pub copy_temp_dir: Option<&'a str>,
/// Relative to "testdata" directory
pub cwd: Option<&'a str>,
}
impl CheckOutputIntegrationTest<'_> {
pub fn output(&self) -> TestCommandOutput {
let mut context_builder = TestContextBuilder::default();
if self.temp_cwd {
context_builder = context_builder.use_temp_cwd();
}
if let Some(dir) = &self.copy_temp_dir {
context_builder = context_builder.use_copy_temp_dir(dir);
}
if self.http_server {
context_builder = context_builder.use_http_server();
}
let context = context_builder.build();
let mut command_builder = context.new_command();
if !self.args.is_empty() {
command_builder = command_builder.args(self.args);
}
if !self.args_vec.is_empty() {
command_builder = command_builder.args_vec(self.args_vec.clone());
}
if let Some(input) = &self.input {
command_builder = command_builder.stdin_text(input);
}
for (key, value) in &self.envs {
command_builder = command_builder.env(key, value);
}
if self.env_clear {
command_builder = command_builder.env_clear();
}
if let Some(cwd) = &self.cwd {
command_builder = command_builder.current_dir(cwd);
}
command_builder.run()
}
}
pub fn with_pty(deno_args: &[&str], action: impl FnMut(Pty)) {
let context = TestContextBuilder::default().use_temp_cwd().build();
context.new_command().args_vec(deno_args).with_pty(action);
}
pub(crate) mod colors {
use std::io::Write;
use termcolor::Ansi;
use termcolor::Color;
use termcolor::ColorSpec;
use termcolor::WriteColor;
pub fn bold<S: AsRef<str>>(s: S) -> String {
let mut style_spec = ColorSpec::new();
style_spec.set_bold(true);
style(s, style_spec)
}
pub fn red<S: AsRef<str>>(s: S) -> String {
fg_color(s, Color::Red)
}
pub fn bold_red<S: AsRef<str>>(s: S) -> String {
bold_fg_color(s, Color::Red)
}
pub fn green<S: AsRef<str>>(s: S) -> String {
fg_color(s, Color::Green)
}
pub fn bold_green<S: AsRef<str>>(s: S) -> String {
bold_fg_color(s, Color::Green)
}
pub fn bold_blue<S: AsRef<str>>(s: S) -> String {
bold_fg_color(s, Color::Blue)
}
pub fn gray<S: AsRef<str>>(s: S) -> String {
fg_color(s, Color::Ansi256(245))
}
pub fn yellow<S: AsRef<str>>(s: S) -> String {
fg_color(s, Color::Yellow)
}
fn bold_fg_color<S: AsRef<str>>(s: S, color: Color) -> String {
let mut style_spec = ColorSpec::new();
style_spec.set_bold(true);
style_spec.set_fg(Some(color));
style(s, style_spec)
}
fn fg_color<S: AsRef<str>>(s: S, color: Color) -> String {
let mut style_spec = ColorSpec::new();
style_spec.set_fg(Some(color));
style(s, style_spec)
}
fn style<S: AsRef<str>>(s: S, colorspec: ColorSpec) -> String {
let mut v = Vec::new();
let mut ansi_writer = Ansi::new(&mut v);
ansi_writer.set_color(&colorspec).unwrap();
ansi_writer.write_all(s.as_ref().as_bytes()).unwrap();
ansi_writer.reset().unwrap();
String::from_utf8_lossy(&v).into_owned()
}
}
#[derive(Debug, Clone)]
pub struct TestMacroCase {
pub name: &'static str,
pub module_name: &'static str,
pub file: &'static str,
/// 1-indexed
pub line: u32,
/// 1-indexed
pub col: u32,
pub func: fn(),
pub flaky: bool,
pub ignore: bool,
pub timeout: Option<usize>,
}
inventory::collect!(TestMacroCase);
pub fn collect_and_filter_tests(
main_category: &mut file_test_runner::collection::CollectedTestCategory<
&'static TestMacroCase,
>,
) {
for test in inventory::iter::<TestMacroCase>() {
main_category.children.push(
file_test_runner::collection::CollectedCategoryOrTest::Test(
file_test_runner::collection::CollectedTest {
name: format!("{}::{}", test.module_name, test.name),
path: PathBuf::from(test.file),
// line and col are 1-indexed, but file_test_runner uses
// 0-indexed numbers, so keep as-is for line to put it on
// probably the function name and then do col - 1 to make
// the column 0-indexed
line_and_column: Some((test.line, test.col - 1)),
data: test,
},
),
);
}
if let Some(filter) = file_test_runner::collection::parse_cli_arg_filter() {
main_category.filter_children(&filter);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/fs.rs | tests/util/server/src/fs.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashSet;
use std::ffi::OsStr;
use std::fs;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::str::FromStr;
use std::sync::Arc;
use anyhow::Context;
use lsp_types::Uri;
use pretty_assertions::assert_eq;
use serde::Serialize;
use serde::de::DeserializeOwned;
use url::Url;
use crate::assertions::assert_wildcard_match;
use crate::lsp::SourceFile;
use crate::lsp::source_file;
use crate::println;
use crate::testdata_path;
/// Characters that are left unencoded in a `Url` path but will be encoded in a
/// VSCode URI.
const URL_TO_URI_PATH: &percent_encoding::AsciiSet =
&percent_encoding::CONTROLS
.add(b' ')
.add(b'!')
.add(b'$')
.add(b'&')
.add(b'\'')
.add(b'(')
.add(b')')
.add(b'*')
.add(b'+')
.add(b',')
.add(b':')
.add(b';')
.add(b'=')
.add(b'@')
.add(b'[')
.add(b']')
.add(b'^')
.add(b'|');
/// Characters that may be left unencoded in a `Url` query but not valid in a
/// `Uri` query.
const URL_TO_URI_QUERY: &percent_encoding::AsciiSet =
&URL_TO_URI_PATH.add(b'\\').add(b'`').add(b'{').add(b'}');
/// Characters that may be left unencoded in a `Url` fragment but not valid in
/// a `Uri` fragment.
const URL_TO_URI_FRAGMENT: &percent_encoding::AsciiSet =
&URL_TO_URI_PATH.add(b'#').add(b'\\').add(b'{').add(b'}');
pub fn url_to_uri(url: &Url) -> Result<Uri, anyhow::Error> {
let components = url::quirks::internal_components(url);
let mut input = String::with_capacity(url.as_str().len());
input.push_str(&url.as_str()[..components.path_start as usize]);
let path = url.path();
let mut chars = path.chars();
let has_drive_letter = chars.next().is_some_and(|c| c == '/')
&& chars.next().is_some_and(|c| c.is_ascii_alphabetic())
&& chars.next().is_some_and(|c| c == ':')
&& chars.next().is_none_or(|c| c == '/');
if has_drive_letter {
let (dl_part, rest) = path.split_at(2);
input.push_str(&dl_part.to_ascii_lowercase());
input.push_str(
&percent_encoding::utf8_percent_encode(rest, URL_TO_URI_PATH).to_string(),
);
} else {
input.push_str(
&percent_encoding::utf8_percent_encode(path, URL_TO_URI_PATH).to_string(),
);
}
if let Some(query) = url.query() {
input.push('?');
input.push_str(
&percent_encoding::utf8_percent_encode(query, URL_TO_URI_QUERY)
.to_string(),
);
}
if let Some(fragment) = url.fragment() {
input.push('#');
input.push_str(
&percent_encoding::utf8_percent_encode(fragment, URL_TO_URI_FRAGMENT)
.to_string(),
);
}
Uri::from_str(&input).map_err(|err| {
anyhow::anyhow!("Could not convert URL \"{url}\" to URI: {err}")
})
}
pub fn url_to_notebook_cell_uri(url: &Url) -> Uri {
let uri = url_to_uri(url).unwrap();
Uri::from_str(&format!(
"vscode-notebook-cell:{}",
uri.as_str().strip_prefix("file:").unwrap()
))
.unwrap()
}
/// Represents a path on the file system, which can be used
/// to perform specific actions.
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct PathRef(PathBuf);
impl AsRef<Path> for PathRef {
fn as_ref(&self) -> &Path {
self.as_path()
}
}
impl AsRef<OsStr> for PathRef {
fn as_ref(&self) -> &OsStr {
self.as_path().as_ref()
}
}
impl std::fmt::Display for PathRef {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.as_path().display())
}
}
impl PathRef {
pub fn new(path: impl AsRef<Path>) -> Self {
Self(path.as_ref().to_path_buf())
}
pub fn parent(&self) -> PathRef {
PathRef(self.as_path().parent().unwrap().to_path_buf())
}
pub fn url_dir(&self) -> Url {
Url::from_directory_path(self.as_path()).unwrap()
}
pub fn url_file(&self) -> Url {
Url::from_file_path(self.as_path()).unwrap()
}
pub fn uri_dir(&self) -> Uri {
url_to_uri(&self.url_dir()).unwrap()
}
pub fn uri_file(&self) -> Uri {
url_to_uri(&self.url_file()).unwrap()
}
pub fn as_path(&self) -> &Path {
self.0.as_path()
}
pub fn to_path_buf(&self) -> PathBuf {
self.0.to_path_buf()
}
pub fn to_string_lossy(&self) -> Cow<'_, str> {
self.0.to_string_lossy()
}
pub fn exists(&self) -> bool {
self.0.exists()
}
pub fn try_exists(&self) -> std::io::Result<bool> {
self.0.try_exists()
}
pub fn is_dir(&self) -> bool {
self.0.is_dir()
}
pub fn is_file(&self) -> bool {
self.0.is_file()
}
pub fn join(&self, path: impl AsRef<Path>) -> PathRef {
PathRef(self.as_path().join(path))
}
pub fn with_extension(&self, ext: impl AsRef<OsStr>) -> PathRef {
PathRef(self.as_path().with_extension(ext))
}
pub fn canonicalize(&self) -> PathRef {
PathRef(strip_unc_prefix(self.as_path().canonicalize().unwrap()))
}
pub fn create_dir_all(&self) {
fs::create_dir_all(self).unwrap();
}
pub fn remove_file(&self) {
fs::remove_file(self).unwrap();
}
pub fn remove_dir_all(&self) {
fs::remove_dir_all(self).unwrap();
}
pub fn read_to_string(&self) -> String {
self.read_to_string_if_exists().unwrap()
}
pub fn read_to_string_if_exists(&self) -> Result<String, anyhow::Error> {
fs::read_to_string(self)
.with_context(|| format!("Could not read file: {}", self))
}
pub fn read_to_bytes_if_exists(&self) -> Result<Vec<u8>, anyhow::Error> {
fs::read(self).with_context(|| format!("Could not read file: {}", self))
}
#[track_caller]
pub fn read_json<TValue: DeserializeOwned>(&self) -> TValue {
serde_json::from_str(&self.read_to_string())
.with_context(|| format!("Failed deserializing: {}", self))
.unwrap()
}
#[track_caller]
pub fn read_json_value(&self) -> serde_json::Value {
serde_json::from_str(&self.read_to_string())
.with_context(|| format!("Failed deserializing: {}", self))
.unwrap()
}
#[track_caller]
pub fn read_jsonc_value(&self) -> serde_json::Value {
jsonc_parser::parse_to_serde_value(
&self.read_to_string(),
&Default::default(),
)
.with_context(|| format!("Failed to parse {}", self))
.unwrap()
.unwrap_or_else(|| panic!("JSON file was empty for {}", self))
}
#[track_caller]
pub fn rename(&self, to: impl AsRef<Path>) {
let to = self.join(to);
if let Some(parent_path) = to.as_path().parent() {
fs::create_dir_all(parent_path).unwrap()
}
fs::rename(self, to).unwrap();
}
#[track_caller]
pub fn append(&self, text: impl AsRef<str>) {
let mut file = OpenOptions::new().append(true).open(self).unwrap();
file.write_all(text.as_ref().as_bytes()).unwrap();
}
#[track_caller]
pub fn write(&self, text: impl AsRef<[u8]>) {
if let Some(parent_path) = self.as_path().parent() {
fs::create_dir_all(parent_path).unwrap()
}
fs::write(self, text).unwrap();
}
#[track_caller]
pub fn write_json<TValue: Serialize>(&self, value: &TValue) {
let text = serde_json::to_string_pretty(value).unwrap();
self.write(text);
}
#[track_caller]
pub fn symlink_dir(
&self,
oldpath: impl AsRef<Path>,
newpath: impl AsRef<Path>,
) {
let oldpath = self.as_path().join(oldpath);
let newpath = self.as_path().join(newpath);
if let Some(parent_path) = newpath.parent() {
fs::create_dir_all(parent_path).unwrap()
}
#[cfg(unix)]
{
use std::os::unix::fs::symlink;
symlink(oldpath, newpath).unwrap();
}
#[cfg(not(unix))]
{
use std::os::windows::fs::symlink_dir;
symlink_dir(oldpath, newpath).unwrap();
}
}
#[track_caller]
pub fn symlink_file(
&self,
oldpath: impl AsRef<Path>,
newpath: impl AsRef<Path>,
) {
let oldpath = self.as_path().join(oldpath);
let newpath = self.as_path().join(newpath);
if let Some(parent_path) = newpath.as_path().parent() {
fs::create_dir_all(parent_path).unwrap()
}
#[cfg(unix)]
{
use std::os::unix::fs::symlink;
symlink(oldpath, newpath).unwrap();
}
#[cfg(not(unix))]
{
use std::os::windows::fs::symlink_file;
symlink_file(oldpath, newpath).unwrap();
}
}
#[track_caller]
pub fn read_dir(&self) -> fs::ReadDir {
fs::read_dir(self.as_path())
.with_context(|| format!("Reading {}", self.as_path().display()))
.unwrap()
}
#[track_caller]
pub fn copy(&self, to: &impl AsRef<Path>) {
std::fs::copy(self.as_path(), to)
.with_context(|| format!("Copying {} to {}", self, to.as_ref().display()))
.unwrap();
}
/// Copies this directory to another directory.
///
/// Note: Does not handle symlinks.
pub fn copy_to_recursive(&self, to: &PathRef) {
self.copy_to_recursive_with_exclusions(to, &HashSet::new())
}
pub fn copy_to_recursive_with_exclusions(
&self,
to: &PathRef,
file_exclusions: &HashSet<PathRef>,
) {
to.create_dir_all();
let read_dir = self.read_dir();
for entry in read_dir {
let entry = entry.unwrap();
let file_type = entry.file_type().unwrap();
let new_from = self.join(entry.file_name());
let new_to = to.join(entry.file_name());
if file_type.is_dir() {
new_from.copy_to_recursive(&new_to);
} else if file_type.is_file() && !file_exclusions.contains(&new_from) {
new_from.copy(&new_to);
}
}
}
#[track_caller]
pub fn mark_executable(&self) {
if cfg!(unix) {
Command::new("chmod").arg("+x").arg(self).output().unwrap();
}
}
#[track_caller]
pub fn make_dir_readonly(&self) {
self.create_dir_all();
if cfg!(windows) {
Command::new("attrib").arg("+r").arg(self).output().unwrap();
} else if cfg!(unix) {
Command::new("chmod").arg("555").arg(self).output().unwrap();
}
}
#[track_caller]
pub fn assert_matches_file(&self, wildcard_file: impl AsRef<Path>) -> &Self {
let wildcard_file = testdata_path().join(wildcard_file);
println!("output path {}", wildcard_file);
let expected_text = wildcard_file.read_to_string();
self.assert_matches_text(&expected_text)
}
#[track_caller]
pub fn assert_matches_text(&self, wildcard_text: impl AsRef<str>) -> &Self {
let actual = self.read_to_string();
assert_wildcard_match(&actual, wildcard_text.as_ref());
self
}
#[track_caller]
pub fn assert_matches_json(&self, expected: serde_json::Value) {
let actual_json = self.read_json_value();
if actual_json != expected {
let actual_text = serde_json::to_string_pretty(&actual_json).unwrap();
let expected_text = serde_json::to_string_pretty(&expected).unwrap();
assert_eq!(actual_text, expected_text);
}
}
}
#[cfg(not(windows))]
#[inline]
fn strip_unc_prefix(path: PathBuf) -> PathBuf {
path
}
/// Strips the unc prefix (ex. \\?\) from Windows paths.
///
/// Lifted from deno_core for use in the tests.
#[cfg(windows)]
fn strip_unc_prefix(path: PathBuf) -> PathBuf {
use std::path::Component;
use std::path::Prefix;
let mut components = path.components();
match components.next() {
Some(Component::Prefix(prefix)) => {
match prefix.kind() {
// \\?\device
Prefix::Verbatim(device) => {
let mut path = PathBuf::new();
path.push(format!(r"\\{}\", device.to_string_lossy()));
path.extend(components.filter(|c| !matches!(c, Component::RootDir)));
path
}
// \\?\c:\path
Prefix::VerbatimDisk(_) => {
let mut path = PathBuf::new();
path.push(prefix.as_os_str().to_string_lossy().replace(r"\\?\", ""));
path.extend(components);
path
}
// \\?\UNC\hostname\share_name\path
Prefix::VerbatimUNC(hostname, share_name) => {
let mut path = PathBuf::new();
path.push(format!(
r"\\{}\{}\",
hostname.to_string_lossy(),
share_name.to_string_lossy()
));
path.extend(components.filter(|c| !matches!(c, Component::RootDir)));
path
}
_ => path,
}
}
_ => path,
}
}
enum TempDirInner {
TempDir {
path_ref: PathRef,
// kept alive for the duration of the temp dir
_dir: tempfile::TempDir,
},
Path(PathRef),
Symlinked {
symlink: Arc<TempDirInner>,
target: Arc<TempDirInner>,
},
}
impl TempDirInner {
pub fn path(&self) -> &PathRef {
match self {
Self::Path(path_ref) => path_ref,
Self::TempDir { path_ref, .. } => path_ref,
Self::Symlinked { symlink, .. } => symlink.path(),
}
}
pub fn target_path(&self) -> &PathRef {
match self {
TempDirInner::Symlinked { target, .. } => target.target_path(),
_ => self.path(),
}
}
}
impl Drop for TempDirInner {
fn drop(&mut self) {
if let Self::Path(path) = self {
_ = fs::remove_dir_all(path);
}
}
}
/// For creating temporary directories in tests.
///
/// This was done because `tempfiles::TempDir` was very slow on Windows.
///
/// Note: Do not use this in actual code as this does not protect against
/// "insecure temporary file" security vulnerabilities.
#[derive(Clone)]
pub struct TempDir(Arc<TempDirInner>);
impl Default for TempDir {
fn default() -> Self {
Self::new()
}
}
impl TempDir {
pub fn new() -> Self {
Self::new_inner(&std::env::temp_dir(), None)
}
pub fn new_with_prefix(prefix: &str) -> Self {
Self::new_inner(&std::env::temp_dir(), Some(prefix))
}
pub fn new_in(parent_dir: &Path) -> Self {
Self::new_inner(parent_dir, None)
}
pub fn new_with_path(path: &Path) -> Self {
Self(Arc::new(TempDirInner::Path(PathRef(path.to_path_buf()))))
}
pub fn new_symlinked(target: TempDir) -> Self {
let target_path = target.path();
let path = target_path.parent().join(format!(
"{}_symlinked",
target_path.as_path().file_name().unwrap().to_str().unwrap()
));
target.symlink_dir(target.path(), &path);
TempDir(Arc::new(TempDirInner::Symlinked {
target: target.0,
symlink: Self::new_with_path(path.as_path()).0,
}))
}
/// Create a new temporary directory with the given prefix as part of its name, if specified.
fn new_inner(parent_dir: &Path, prefix: Option<&str>) -> Self {
let mut builder = tempfile::Builder::new();
builder.prefix(prefix.unwrap_or("deno-cli-test"));
let dir = builder
.tempdir_in(parent_dir)
.expect("Failed to create a temporary directory");
Self(Arc::new(TempDirInner::TempDir {
path_ref: PathRef(dir.path().to_path_buf()),
_dir: dir,
}))
}
pub fn url(&self) -> Url {
Url::from_directory_path(self.path()).unwrap()
}
pub fn uri(&self) -> Uri {
url_to_uri(&self.url()).unwrap()
}
pub fn path(&self) -> &PathRef {
self.0.path()
}
/// The resolved final target path if this is a symlink.
pub fn target_path(&self) -> &PathRef {
self.0.target_path()
}
pub fn create_dir_all(&self, path: impl AsRef<Path>) {
self.target_path().join(path).create_dir_all()
}
pub fn remove_file(&self, path: impl AsRef<Path>) {
self.target_path().join(path).remove_file()
}
pub fn remove_dir_all(&self, path: impl AsRef<Path>) {
self.target_path().join(path).remove_dir_all()
}
pub fn read_to_string(&self, path: impl AsRef<Path>) -> String {
self.target_path().join(path).read_to_string()
}
pub fn rename(&self, from: impl AsRef<Path>, to: impl AsRef<Path>) {
self.target_path().join(from).rename(to)
}
pub fn write(&self, path: impl AsRef<Path>, text: impl AsRef<[u8]>) {
self.target_path().join(path).write(text)
}
pub fn source_file(
&self,
path: impl AsRef<Path>,
text: impl AsRef<str>,
) -> SourceFile {
let path = self.target_path().join(path);
source_file(path, text)
}
pub fn symlink_dir(
&self,
oldpath: impl AsRef<Path>,
newpath: impl AsRef<Path>,
) {
self.target_path().symlink_dir(oldpath, newpath)
}
pub fn symlink_file(
&self,
oldpath: impl AsRef<Path>,
newpath: impl AsRef<Path>,
) {
self.target_path().symlink_file(oldpath, newpath)
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/spawn.rs | tests/util/server/src/spawn.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::convert::Infallible;
use anyhow::Error;
/// For unix targets, we just replace our current process with the desired cargo process.
#[cfg(unix)]
pub fn exec_replace_inner(
cmd: &str,
args: &[&str],
) -> Result<Infallible, Error> {
use std::ffi::CStr;
use std::ffi::CString;
let args = args
.iter()
.map(|arg| CString::new(*arg).unwrap())
.collect::<Vec<_>>();
let args: Vec<&CStr> =
args.iter().map(|arg| arg.as_ref()).collect::<Vec<_>>();
let err = nix::unistd::execvp(&CString::new(cmd).unwrap(), &args)
.expect_err("Impossible");
Err(err.into())
}
#[cfg(windows)]
pub fn exec_replace_inner(
cmd: &str,
args: &[&str],
) -> Result<Infallible, Error> {
use std::os::windows::io::AsRawHandle;
use std::process::Command;
use win32job::ExtendedLimitInfo;
use win32job::Job;
// Use a job to ensure the child process's lifetime does not exceed the current process's lifetime.
// This ensures that if the current process is terminated (e.g., via ctrl+c or task manager),
// the child process is automatically reaped.
// For more information about this technique, see Raymond Chen's blog post:
// https://devblogs.microsoft.com/oldnewthing/20131209-00/?p=2433
// Note: While our implementation is not perfect, it serves its purpose for test code.
// In the future, we may directly obtain the main thread's handle from Rust code and use it
// to create a suspended process that we can then resume:
// https://github.com/rust-lang/rust/issues/96723
// Creates a child process and assigns it to our current job.
// A more reliable approach would be to create the child suspended and then assign it to the job.
// For now, we create the child, create the job, and then assign both us and the child to the job.
let mut child = Command::new(cmd).args(&args[1..]).spawn()?;
let mut info = ExtendedLimitInfo::default();
info.limit_kill_on_job_close();
let job = Job::create_with_limit_info(&info)?;
job.assign_current_process()?;
let handle = child.as_raw_handle();
job.assign_process(handle as _)?;
let exit = child.wait()?;
std::process::exit(exit.code().unwrap_or(1));
}
/// Runs a command, replacing the current process on Unix. On Windows, this function blocks and
/// exits.
///
/// In either case, the only way this function returns is if it fails to launch the child
/// process.
pub fn exec_replace(command: &str, args: &[&str]) -> Result<Infallible, Error> {
exec_replace_inner(command, args)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/lsp.rs | tests/util/server/src/lsp.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::ffi::OsStr;
use std::ffi::OsString;
use std::io;
use std::io::BufRead;
use std::io::BufReader;
use std::io::Write;
use std::path::Path;
use std::process::Child;
use std::process::ChildStdin;
use std::process::ChildStdout;
use std::process::Command;
use std::process::Stdio;
use std::sync::Arc;
use std::sync::mpsc;
use std::time::Duration;
use std::time::Instant;
use anyhow::Result;
use indexmap::IndexMap;
use lsp_types as lsp;
use lsp_types::ClientCapabilities;
use lsp_types::ClientInfo;
use lsp_types::CodeActionCapabilityResolveSupport;
use lsp_types::CodeActionClientCapabilities;
use lsp_types::CodeActionKindLiteralSupport;
use lsp_types::CodeActionLiteralSupport;
use lsp_types::CompletionClientCapabilities;
use lsp_types::CompletionItemCapability;
use lsp_types::FoldingRangeClientCapabilities;
use lsp_types::InitializeParams;
use lsp_types::TextDocumentClientCapabilities;
use lsp_types::TextDocumentSyncClientCapabilities;
use lsp_types::Uri;
use lsp_types::WorkspaceClientCapabilities;
use once_cell::sync::Lazy;
use parking_lot::Condvar;
use parking_lot::Mutex;
use regex::Regex;
use serde::Deserialize;
use serde::Serialize;
use serde::de;
use serde_json::Value;
use serde_json::json;
use serde_json::to_value;
use url::Url;
use super::TempDir;
use crate::PathRef;
use crate::deno_exe_path;
use crate::eprintln;
use crate::jsr_registry_url;
use crate::npm_registry_url;
use crate::print::spawn_thread;
static CONTENT_TYPE_REG: Lazy<Regex> =
lazy_regex::lazy_regex!(r"(?i)^content-length:\s+(\d+)");
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct LspResponseError {
code: i32,
message: String,
data: Option<Value>,
}
#[derive(Clone, Debug)]
pub enum LspMessage {
Notification(String, Option<Value>),
Request(u64, String, Option<Value>),
Response(u64, Option<Value>, Option<LspResponseError>),
}
impl<'a> From<&'a [u8]> for LspMessage {
fn from(s: &'a [u8]) -> Self {
let value: Value = serde_json::from_slice(s).unwrap();
let obj = value.as_object().unwrap();
if obj.contains_key("id") && obj.contains_key("method") {
let id = obj.get("id").unwrap().as_u64().unwrap();
let method = obj.get("method").unwrap().as_str().unwrap().to_string();
Self::Request(id, method, obj.get("params").cloned())
} else if obj.contains_key("id") {
let id = obj.get("id").unwrap().as_u64().unwrap();
let maybe_error: Option<LspResponseError> = obj
.get("error")
.map(|v| serde_json::from_value(v.clone()).unwrap());
Self::Response(id, obj.get("result").cloned(), maybe_error)
} else {
assert!(obj.contains_key("method"));
let method = obj.get("method").unwrap().as_str().unwrap().to_string();
Self::Notification(method, obj.get("params").cloned())
}
}
}
fn read_message<R>(reader: &mut R) -> Result<Option<Vec<u8>>>
where
R: io::Read + io::BufRead,
{
let mut content_length = 0_usize;
loop {
let mut buf = String::new();
if reader.read_line(&mut buf)? == 0 {
return Ok(None);
}
if let Some(captures) = CONTENT_TYPE_REG.captures(&buf) {
let content_length_match = captures
.get(1)
.ok_or_else(|| anyhow::anyhow!("missing capture"))?;
content_length = content_length_match.as_str().parse::<usize>()?;
}
if &buf == "\r\n" {
break;
}
}
let mut msg_buf = vec![0_u8; content_length];
reader.read_exact(&mut msg_buf)?;
Ok(Some(msg_buf))
}
struct LspStdoutReader {
pending_messages: Arc<(Mutex<Vec<LspMessage>>, Condvar)>,
read_messages: Vec<LspMessage>,
}
impl LspStdoutReader {
pub fn new(mut buf_reader: io::BufReader<ChildStdout>) -> Self {
let messages: Arc<(Mutex<Vec<LspMessage>>, Condvar)> = Default::default();
spawn_thread({
let messages = messages.clone();
move || {
while let Ok(Some(msg_buf)) = read_message(&mut buf_reader) {
let msg = LspMessage::from(msg_buf.as_slice());
let cvar = &messages.1;
{
let mut messages = messages.0.lock();
messages.push(msg);
}
cvar.notify_all();
}
}
});
LspStdoutReader {
pending_messages: messages,
read_messages: Vec::new(),
}
}
pub fn pending_len(&self) -> usize {
self.pending_messages.0.lock().len()
}
pub fn output_pending_messages(&self) {
let messages = self.pending_messages.0.lock();
eprintln!("{:?}", messages);
}
pub fn had_message(&self, is_match: impl Fn(&LspMessage) -> bool) -> bool {
self.read_messages.iter().any(&is_match)
|| self.pending_messages.0.lock().iter().any(&is_match)
}
pub fn read_message<R>(
&mut self,
mut get_match: impl FnMut(&LspMessage) -> Option<R>,
) -> R {
let (msg_queue, cvar) = &*self.pending_messages;
let mut msg_queue = msg_queue.lock();
loop {
for i in 0..msg_queue.len() {
let msg = &msg_queue[i];
if let Some(result) = get_match(msg) {
let msg = msg_queue.remove(i);
self.read_messages.push(msg);
return result;
}
}
cvar.wait(&mut msg_queue);
}
}
pub fn read_latest_message<R>(
&mut self,
mut get_match: impl FnMut(&LspMessage) -> Option<R>,
) -> R {
let (msg_queue, cvar) = &*self.pending_messages;
let mut msg_queue = msg_queue.lock();
loop {
for i in (0..msg_queue.len()).rev() {
let msg = &msg_queue[i];
if let Some(result) = get_match(msg) {
let msg = msg_queue.remove(i);
self.read_messages.push(msg);
return result;
}
}
cvar.wait(&mut msg_queue);
}
}
}
pub struct InitializeParamsBuilder {
params: InitializeParams,
}
impl InitializeParamsBuilder {
#[allow(clippy::new_without_default)]
pub fn new(config: Value) -> Self {
let mut config_as_options = json!({});
if let Some(object) = config.as_object() {
if let Some(deno) = object.get("deno")
&& let Some(deno) = deno.as_object()
{
config_as_options = json!(deno.clone());
}
let config_as_options = config_as_options.as_object_mut().unwrap();
if let Some(typescript) = object.get("typescript") {
config_as_options.insert("typescript".to_string(), typescript.clone());
}
if let Some(javascript) = object.get("javascript") {
config_as_options.insert("javascript".to_string(), javascript.clone());
}
}
Self {
params: InitializeParams {
process_id: None,
client_info: Some(ClientInfo {
name: "test-harness".to_string(),
version: Some("1.0.0".to_string()),
}),
initialization_options: Some(config_as_options),
capabilities: ClientCapabilities {
text_document: Some(TextDocumentClientCapabilities {
code_action: Some(CodeActionClientCapabilities {
code_action_literal_support: Some(CodeActionLiteralSupport {
code_action_kind: CodeActionKindLiteralSupport {
value_set: vec![
"quickfix".to_string(),
"refactor".to_string(),
],
},
}),
is_preferred_support: Some(true),
data_support: Some(true),
disabled_support: Some(true),
resolve_support: Some(CodeActionCapabilityResolveSupport {
properties: vec!["edit".to_string()],
}),
..Default::default()
}),
completion: Some(CompletionClientCapabilities {
completion_item: Some(CompletionItemCapability {
snippet_support: Some(true),
..Default::default()
}),
..Default::default()
}),
diagnostic: Some(Default::default()),
folding_range: Some(FoldingRangeClientCapabilities {
line_folding_only: Some(true),
..Default::default()
}),
synchronization: Some(TextDocumentSyncClientCapabilities {
dynamic_registration: Some(true),
will_save: Some(true),
will_save_wait_until: Some(true),
did_save: Some(true),
}),
..Default::default()
}),
workspace: Some(WorkspaceClientCapabilities {
configuration: Some(true),
diagnostic: Some(lsp::DiagnosticWorkspaceClientCapabilities {
refresh_support: Some(true),
}),
workspace_folders: Some(true),
..Default::default()
}),
experimental: Some(json!({
"testingApi": true
})),
..Default::default()
},
..Default::default()
},
}
}
#[allow(deprecated)]
pub fn set_maybe_root_uri(&mut self, value: Option<Uri>) -> &mut Self {
self.params.root_uri = value;
self
}
pub fn set_root_uri(&mut self, value: Uri) -> &mut Self {
self.set_maybe_root_uri(Some(value))
}
pub fn set_workspace_folders(
&mut self,
folders: Vec<lsp_types::WorkspaceFolder>,
) -> &mut Self {
self.params.workspace_folders = Some(folders);
self
}
pub fn disable_testing_api(&mut self) -> &mut Self {
let obj = self
.params
.capabilities
.experimental
.as_mut()
.unwrap()
.as_object_mut()
.unwrap();
obj.insert("testingApi".to_string(), false.into());
let options = self.initialization_options_mut();
options.remove("testing");
self
}
pub fn enable_client_provided_organize_imports(&mut self) -> &mut Self {
let obj = self
.params
.capabilities
.experimental
.as_mut()
.unwrap()
.as_object_mut()
.unwrap();
obj.insert("clientProvidedOrganizeImports".to_string(), true.into());
self
}
pub fn set_cache(&mut self, value: impl AsRef<str>) -> &mut Self {
let options = self.initialization_options_mut();
options.insert("cache".to_string(), value.as_ref().to_string().into());
self
}
pub fn set_code_lens(
&mut self,
value: Option<serde_json::Value>,
) -> &mut Self {
let options = self.initialization_options_mut();
if let Some(value) = value {
options.insert("codeLens".to_string(), value);
} else {
options.remove("codeLens");
}
self
}
pub fn set_config(&mut self, value: impl AsRef<str>) -> &mut Self {
let options = self.initialization_options_mut();
options.insert("config".to_string(), value.as_ref().to_string().into());
self
}
pub fn set_disable_paths(&mut self, value: Vec<String>) -> &mut Self {
let options = self.initialization_options_mut();
options.insert("disablePaths".to_string(), value.into());
self
}
pub fn set_enable_paths(&mut self, value: Vec<String>) -> &mut Self {
let options = self.initialization_options_mut();
options.insert("enablePaths".to_string(), value.into());
self
}
pub fn set_deno_enable(&mut self, value: bool) -> &mut Self {
let options = self.initialization_options_mut();
options.insert("enable".to_string(), value.into());
self
}
pub fn set_import_map(&mut self, value: impl AsRef<str>) -> &mut Self {
let options = self.initialization_options_mut();
options.insert("importMap".to_string(), value.as_ref().to_string().into());
self
}
pub fn set_preload_limit(&mut self, arg: usize) -> &mut Self {
let options = self.initialization_options_mut();
options.insert("documentPreloadLimit".to_string(), arg.into());
self
}
pub fn set_tls_certificate(&mut self, value: impl AsRef<str>) -> &mut Self {
let options = self.initialization_options_mut();
options.insert(
"tlsCertificate".to_string(),
value.as_ref().to_string().into(),
);
self
}
pub fn set_unstable(&mut self, value: bool) -> &mut Self {
let options = self.initialization_options_mut();
options.insert("unstable".to_string(), value.into());
self
}
pub fn add_test_server_suggestions(&mut self) -> &mut Self {
self.set_suggest_imports_hosts(vec![(
"http://localhost:4545/".to_string(),
true,
)])
}
pub fn set_suggest_imports_hosts(
&mut self,
values: Vec<(String, bool)>,
) -> &mut Self {
let options = self.initialization_options_mut();
let suggest = options.get_mut("suggest").unwrap().as_object_mut().unwrap();
let imports = suggest.get_mut("imports").unwrap().as_object_mut().unwrap();
let hosts = imports.get_mut("hosts").unwrap().as_object_mut().unwrap();
hosts.clear();
for (key, value) in values {
hosts.insert(key, value.into());
}
self
}
pub fn with_capabilities(
&mut self,
mut action: impl FnMut(&mut ClientCapabilities),
) -> &mut Self {
action(&mut self.params.capabilities);
self
}
fn initialization_options_mut(
&mut self,
) -> &mut serde_json::Map<String, serde_json::Value> {
let options = self.params.initialization_options.as_mut().unwrap();
options.as_object_mut().unwrap()
}
pub fn build(&self) -> InitializeParams {
self.params.clone()
}
}
pub struct LspClientBuilder {
stderr_inherit: bool,
stderr_null: bool,
log_debug: bool,
deno_exe: PathRef,
root_dir: PathRef,
deno_dir: TempDir,
envs: HashMap<OsString, OsString>,
collect_perf: bool,
}
impl LspClientBuilder {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self::new_with_dir(TempDir::new())
}
pub fn new_with_dir(deno_dir: TempDir) -> Self {
Self {
stderr_inherit: false,
stderr_null: false,
log_debug: false,
deno_exe: deno_exe_path(),
root_dir: deno_dir.path().clone(),
deno_dir,
envs: Default::default(),
collect_perf: false,
}
}
pub fn deno_exe(mut self, exe_path: impl AsRef<Path>) -> Self {
self.deno_exe = PathRef::new(exe_path);
self
}
// not deprecated, this is just here so you don't accidentally
// commit code with this enabled
#[deprecated]
pub fn stderr_inherit(mut self) -> Self {
self.stderr_inherit = true;
self
}
pub fn stderr_null(mut self) -> Self {
self.stderr_null = true;
self
}
pub fn log_debug(mut self) -> Self {
self.log_debug = true;
self
}
/// Whether to collect performance records (marks / measures, as emitted
/// by the lsp in the `performance` module).
/// Disables `stderr_inherit` and `stderr_null`.
pub fn collect_perf(mut self) -> Self {
self.stderr_inherit = false;
self.stderr_null = false;
self.collect_perf = true;
self
}
pub fn set_root_dir(mut self, root_dir: PathRef) -> Self {
self.root_dir = root_dir;
self
}
pub fn env(
mut self,
key: impl AsRef<OsStr>,
value: impl AsRef<OsStr>,
) -> Self {
self
.envs
.insert(key.as_ref().to_owned(), value.as_ref().to_owned());
self
}
pub fn build(&self) -> LspClient {
self.build_result().unwrap()
}
pub fn build_result(&self) -> Result<LspClient> {
let deno_dir = self.deno_dir.clone();
let mut command = Command::new(&self.deno_exe);
let mut args = vec!["lsp".to_string()];
if self.log_debug {
args.push("--log-level=debug".to_string());
}
command
.env("DENO_DIR", deno_dir.path())
.env("NPM_CONFIG_REGISTRY", npm_registry_url())
.env("JSR_URL", jsr_registry_url())
// turn on diagnostic synchronization communication
.env("DENO_INTERNAL_DIAGNOSTIC_BATCH_NOTIFICATIONS", "1")
.env("DENO_NO_UPDATE_CHECK", "1")
.args(args)
.stdin(Stdio::piped())
.stdout(Stdio::piped());
for (key, value) in &self.envs {
command.env(key, value);
}
if self.stderr_null {
command.stderr(Stdio::null());
} else if !self.stderr_inherit {
command.stderr(Stdio::piped());
}
let mut child = command.spawn()?;
let stdout = child.stdout.take().unwrap();
let buf_reader = io::BufReader::new(stdout);
let reader = LspStdoutReader::new(buf_reader);
let stdin = child.stdin.take().unwrap();
let writer = io::BufWriter::new(stdin);
let (stderr_lines_rx, perf_rx) =
if !self.stderr_null && !self.stderr_inherit {
let stderr = child.stderr.take().unwrap();
let (tx, rx) = mpsc::channel::<String>();
let (perf_tx, perf_rx) =
self.collect_perf.then(mpsc::channel::<PerfRecord>).unzip();
spawn_thread(move || {
let stderr = BufReader::new(stderr);
for line in stderr.lines() {
match line {
Ok(line) => {
eprintln!("{}", line);
if let Some(tx) = perf_tx.as_ref() {
// look for perf records
if line.starts_with('{') && line.ends_with("},") {
match serde_json::from_str::<PerfRecord>(
line.trim_end_matches(','),
) {
Ok(record) => {
tx.send(record).unwrap();
continue;
}
Err(err) => {
eprintln!("failed to parse perf record: {:#}", err);
}
}
}
}
tx.send(line).unwrap();
}
Err(err) => {
panic!("failed to read line from stderr: {:#}", err);
}
}
}
});
(Some(rx), perf_rx)
} else {
(None, None)
};
Ok(LspClient {
child,
reader,
request_id: 1,
start: Instant::now(),
root_dir: self.root_dir.clone(),
writer,
deno_dir,
stderr_lines_rx,
config: json!("{}"),
supports_workspace_configuration: false,
supports_pull_diagnostics: false,
perf: perf_rx.map(Perf::new),
open_docs: Default::default(),
notebook_cells: Default::default(),
})
}
}
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "camelCase", tag = "type")]
/// A performance record, emitted by the `lsp::performance`
/// module.
pub enum PerfRecord {
Mark(PerfMark),
Measure(PerfMeasure),
}
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PerfMeasure {
name: String,
count: u32,
duration: f64,
}
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PerfMark {
name: String,
#[serde(default)]
count: Option<u32>,
#[serde(default)]
args: Option<Value>,
}
#[derive(Debug)]
pub struct Perf {
records: Vec<PerfRecord>,
measures_counts: HashMap<String, u32>,
rx: mpsc::Receiver<PerfRecord>,
}
impl Perf {
fn new(rx: mpsc::Receiver<PerfRecord>) -> Self {
Self {
records: Default::default(),
measures_counts: Default::default(),
rx,
}
}
fn drain_until(&mut self, f: impl Fn(&PerfRecord) -> bool) {
let timeout_time =
Instant::now().checked_add(Duration::from_secs(5)).unwrap();
let mut found = false;
loop {
while let Ok(record) = self.rx.try_recv() {
if let PerfRecord::Measure(measure) = &record {
*self
.measures_counts
.entry(measure.name.clone())
.or_default() += 1;
}
if f(&record) {
found = true;
}
self.records.push(record);
}
if found {
break;
}
std::thread::sleep(Duration::from_millis(20));
if Instant::now() > timeout_time {
panic!("timed out waiting for perf record");
}
}
}
pub fn measures(&self) -> impl IntoIterator<Item = &PerfMeasure> {
self.records.iter().filter_map(|record| match record {
PerfRecord::Measure(measure) => Some(measure),
_ => None,
})
}
pub fn measure_count(&self, name: &str) -> u32 {
self.measures_counts.get(name).copied().unwrap_or_default()
}
}
pub struct LspClient {
child: Child,
reader: LspStdoutReader,
request_id: u64,
start: Instant,
writer: io::BufWriter<ChildStdin>,
deno_dir: TempDir,
root_dir: PathRef,
stderr_lines_rx: Option<mpsc::Receiver<String>>,
config: serde_json::Value,
supports_workspace_configuration: bool,
supports_pull_diagnostics: bool,
perf: Option<Perf>,
open_docs: IndexMap<Uri, i32>,
notebook_cells: HashMap<Uri, Vec<Uri>>,
}
impl Drop for LspClient {
fn drop(&mut self) {
match self.child.try_wait() {
Ok(None) => {
self.child.kill().unwrap();
let _ = self.child.wait();
}
Ok(Some(_)) => {}
Err(e) => panic!("pebble error: {e}"),
}
}
}
impl LspClient {
pub fn deno_dir(&self) -> &TempDir {
&self.deno_dir
}
pub fn duration(&self) -> Duration {
self.start.elapsed()
}
pub fn queue_is_empty(&self) -> bool {
self.reader.pending_len() == 0
}
pub fn queue_len(&self) -> usize {
self.reader.output_pending_messages();
self.reader.pending_len()
}
/// Collects performance records until a measure with the given name is
/// emitted.
pub fn perf_wait_for_measure(&mut self, name: &str) -> &Perf {
let perf = self
.perf
.as_mut()
.expect("must setup with client_builder.collect_perf()");
perf.drain_until(|record| matches!(record, PerfRecord::Measure(measure) if measure.name == name));
perf
}
#[track_caller]
pub fn wait_until_stderr_line(
&self,
mut condition: impl FnMut(&str) -> bool,
) {
let timeout_time =
Instant::now().checked_add(Duration::from_secs(5)).unwrap();
let lines_rx = self
.stderr_lines_rx
.as_ref()
.expect("must not setup with client_builder.stderr_null() or client_builder.stderr_inherit()");
let mut found_lines = Vec::new();
while Instant::now() < timeout_time {
if let Ok(line) = lines_rx.try_recv() {
if condition(&line) {
return;
}
found_lines.push(line);
}
std::thread::sleep(Duration::from_millis(20));
}
eprintln!("==== STDERR OUTPUT ====");
for line in found_lines {
eprintln!("{}", line)
}
eprintln!("== END STDERR OUTPUT ==");
panic!("Timed out waiting on condition.")
}
pub fn initialize_default(&mut self) {
self.initialize(|_| {})
}
pub fn initialize(
&mut self,
do_build: impl Fn(&mut InitializeParamsBuilder),
) {
self.initialize_with_config(
do_build,
json!({ "deno": {
"enable": true,
"cache": null,
"certificateStores": null,
"codeLens": {
"implementations": false,
"references": false,
"referencesAllFunctions": false,
"test": true,
},
"config": null,
"importMap": null,
"lint": true,
"suggest": {
"autoImports": true,
"completeFunctionCalls": false,
"names": true,
"paths": true,
"imports": {
"hosts": {},
},
},
"testing": {
"args": [
"--allow-all"
],
"enable": true,
},
"tlsCertificate": null,
"unsafelyIgnoreCertificateErrors": null,
"unstable": false,
// setting this causes performance records to be logged
// to stderr
"internalDebug": self.perf.is_some(),
} }),
)
}
pub fn initialize_with_config(
&mut self,
do_build: impl Fn(&mut InitializeParamsBuilder),
mut config: Value,
) {
let mut builder = InitializeParamsBuilder::new(config.clone());
builder.set_root_uri(self.root_dir.uri_dir());
do_build(&mut builder);
let params: InitializeParams = builder.build();
// `config` must be updated to account for the builder changes.
// TODO(nayeemrmn): Remove config-related methods from builder.
if let Some(options) = ¶ms.initialization_options
&& let Some(options) = options.as_object()
&& let Some(config) = config.as_object_mut()
{
let mut deno = options.clone();
let typescript = options.get("typescript");
let javascript = options.get("javascript");
deno.remove("typescript");
deno.remove("javascript");
config.insert("deno".to_string(), json!(deno));
if let Some(typescript) = typescript {
config.insert("typescript".to_string(), typescript.clone());
}
if let Some(javascript) = javascript {
config.insert("javascript".to_string(), javascript.clone());
}
}
self.supports_workspace_configuration = match ¶ms.capabilities.workspace
{
Some(workspace) => workspace.configuration == Some(true),
_ => false,
};
self.supports_pull_diagnostics = params
.capabilities
.text_document
.as_ref()
.and_then(|t| t.diagnostic.as_ref())
.is_some();
self.write_request("initialize", params);
self.write_notification("initialized", json!({}));
self.config = config;
if self.supports_workspace_configuration {
self.handle_configuration_request();
}
}
pub fn did_open_file(&mut self, file: &SourceFile) -> CollectedDiagnostics {
self.did_open(json!({
"textDocument": file.text_document(),
}))
}
pub fn did_open(&mut self, params: Value) -> CollectedDiagnostics {
self.did_open_raw(params);
self.read_diagnostics()
}
pub fn did_close_file(&mut self, file: &SourceFile) {
self.did_close(json!({
"textDocument": file.identifier(),
}))
}
pub fn did_close(&mut self, params: Value) {
self.did_close_raw(params);
}
pub fn did_close_raw(&mut self, params: Value) {
self.write_notification("textDocument/didClose", params);
}
pub fn did_open_raw(&mut self, params: Value) {
self.write_notification("textDocument/didOpen", params);
}
pub fn notebook_did_open(
&mut self,
uri: Uri,
version: i32,
cells: Vec<Value>,
) -> CollectedDiagnostics {
let cells = cells
.into_iter()
.map(|c| serde_json::from_value::<lsp::TextDocumentItem>(c).unwrap())
.collect::<Vec<_>>();
let params = lsp::DidOpenNotebookDocumentParams {
notebook_document: lsp::NotebookDocument {
uri,
notebook_type: "jupyter-notebook".to_string(),
version,
metadata: None,
cells: cells
.iter()
.map(|c| lsp::NotebookCell {
kind: if c.language_id == "markdown" {
lsp::NotebookCellKind::Markup
} else {
lsp::NotebookCellKind::Code
},
document: c.uri.clone(),
metadata: None,
execution_summary: None,
})
.collect(),
},
cell_text_documents: cells,
};
self.write_notification("notebookDocument/didOpen", json!(params));
self.read_diagnostics()
}
pub fn change_configuration(&mut self, config: Value) {
self.config = config;
if self.supports_workspace_configuration {
self.write_notification(
"workspace/didChangeConfiguration",
json!({ "settings": {} }),
);
self.handle_configuration_request();
} else {
self.write_notification(
"workspace/didChangeConfiguration",
json!({ "settings": &self.config }),
);
}
}
pub fn handle_configuration_request(&mut self) {
let (id, method, args) = self.read_request::<Value>();
assert_eq!(method, "workspace/configuration");
let params = args.as_ref().unwrap().as_object().unwrap();
let items = params.get("items").unwrap().as_array().unwrap();
let config_object = self.config.as_object().unwrap();
let mut result = vec![];
for item in items {
let item = item.as_object().unwrap();
let section = item.get("section").unwrap().as_str().unwrap();
result.push(config_object.get(section).cloned().unwrap_or_default());
}
self.write_response(id, result);
}
pub fn handle_refresh_diagnostics_request(&mut self) {
let (id, method, _) = self.read_request::<Value>();
assert_eq!(method, "workspace/diagnostic/refresh");
self.write_response(id, None::<()>);
}
pub fn did_save(&mut self, params: Value) {
self.write_notification("textDocument/didSave", params);
}
pub fn did_change_watched_files(&mut self, params: Value) {
self.write_notification("workspace/didChangeWatchedFiles", params);
}
pub fn diagnostic(
&mut self,
uri: impl Serialize,
) -> lsp::DocumentDiagnosticReport {
self.write_request_with_res_as(
"textDocument/diagnostic",
json!({ "textDocument": { "uri": uri } }),
)
}
pub fn cache(
&mut self,
specifiers: impl IntoIterator<Item = impl Serialize>,
referrer: impl Serialize,
) {
self.write_request(
"workspace/executeCommand",
json!({
"command": "deno.cache",
"arguments": [specifiers.into_iter().collect::<Vec<_>>(), referrer],
}),
);
}
pub fn cache_specifier(&mut self, specifier: impl Serialize) {
self.write_request(
"workspace/executeCommand",
json!({
"command": "deno.cache",
"arguments": [[], specifier],
}),
);
}
/// Reads the latest diagnostics.
pub fn read_diagnostics(&mut self) -> CollectedDiagnostics {
if self.supports_pull_diagnostics {
return CollectedDiagnostics(
self
.open_docs
.clone()
.into_iter()
.map(|(uri, version)| {
let report = self.diagnostic(&uri);
let diagnostics = match report {
lsp::DocumentDiagnosticReport::Full(report) => {
report.full_document_diagnostic_report.items
}
lsp::DocumentDiagnosticReport::Unchanged(_) => vec![],
};
lsp::PublishDiagnosticsParams {
uri,
diagnostics,
version: Some(version),
}
})
.collect(),
);
}
self.read_notification_with_method::<()>(
"deno/internalTestDiagnosticBatchStart",
);
// Most tests have just one open document.
let mut diagnostics = Vec::with_capacity(1);
loop {
let (method, params) = self.read_notification::<Value>();
if method == "deno/internalTestDiagnosticBatchEnd" {
break;
}
if method == "textDocument/publishDiagnostics" {
diagnostics.push(
serde_json::from_value::<lsp::PublishDiagnosticsParams>(
params.unwrap(),
)
.unwrap(),
);
}
}
CollectedDiagnostics(diagnostics)
}
pub fn shutdown(&mut self) {
self.write_request("shutdown", json!(null));
self.write_notification("exit", json!(null));
}
pub fn wait_exit(&mut self) -> std::io::Result<std::process::ExitStatus> {
self.child.wait()
}
// it's flaky to assert for a notification because a notification
// might arrive a little later, so only provide a method for asserting
// that there is no notification
pub fn assert_no_notification(&mut self, searching_method: &str) {
assert!(!self.reader.had_message(|message| match message {
LspMessage::Notification(method, _) => method == searching_method,
_ => false,
}))
}
pub fn read_notification<R>(&mut self) -> (String, Option<R>)
where
R: de::DeserializeOwned,
{
self.reader.read_message(|msg| match msg {
LspMessage::Notification(method, maybe_params) => {
let params = serde_json::from_value(maybe_params.clone()?).ok()?;
Some((method.to_string(), params))
}
_ => None,
})
}
pub fn read_latest_notification<R>(&mut self) -> (String, Option<R>)
where
R: de::DeserializeOwned,
{
self.reader.read_latest_message(|msg| match msg {
LspMessage::Notification(method, maybe_params) => {
let params = serde_json::from_value(maybe_params.clone()?).ok()?;
Some((method.to_string(), params))
}
_ => None,
})
}
pub fn read_notification_with_method<R>(
&mut self,
expected_method: &str,
) -> Option<R>
where
R: de::DeserializeOwned,
{
self.reader.read_message(|msg| match msg {
LspMessage::Notification(method, maybe_params) => {
if method != expected_method {
None
} else {
serde_json::from_value(maybe_params.clone()?).ok()
}
}
_ => None,
})
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/builders.rs | tests/util/server/src/builders.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::collections::HashMap;
use std::collections::HashSet;
use std::ffi::OsStr;
use std::ffi::OsString;
use std::io::BufRead;
use std::io::BufReader;
use std::io::Read;
use std::io::Write;
use std::ops::Deref;
use std::ops::DerefMut;
use std::path::Path;
use std::path::PathBuf;
use std::process::Child;
use std::process::Command;
use std::process::Stdio;
use std::rc::Rc;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use file_test_runner::TestResult;
use os_pipe::pipe;
use parking_lot::Mutex;
use crate::HttpServerGuard;
use crate::TempDir;
use crate::assertions::assert_wildcard_match;
use crate::assertions::assert_wildcard_match_with_logger;
use crate::deno_exe_path;
use crate::denort_exe_path;
use crate::env_vars_for_jsr_tests;
use crate::env_vars_for_npm_tests;
use crate::eprintln;
use crate::fs::PathRef;
use crate::http_server;
use crate::jsr_registry_unset_url;
use crate::lsp::LspClientBuilder;
use crate::nodejs_org_mirror_unset_url;
use crate::npm_registry_unset_url;
use crate::print::spawn_thread;
use crate::println;
use crate::pty::Pty;
use crate::servers::tsgo_prebuilt_path;
use crate::strip_ansi_codes;
use crate::testdata_path;
use crate::tests_path;
// Gives the developer a nice error message if they have a deno configuration
// file that will be auto-discovered by the tests and cause a lot of failures.
static HAS_DENO_JSON_IN_WORKING_DIR_ERR: once_cell::sync::Lazy<Option<String>> =
once_cell::sync::Lazy::new(|| {
let testdata_path = testdata_path();
let mut current_dir = testdata_path.as_path();
let deno_json_names = ["deno.json", "deno.jsonc"];
loop {
for name in deno_json_names {
let deno_json_path = current_dir.join(name);
if deno_json_path.exists() {
return Some(format!(
concat!(
"Found deno configuration file at {}. The test suite relies on ",
"a deno.json not existing in any ancestor directory. Please ",
"delete this file so the tests won't auto-discover it.",
),
deno_json_path.display(),
));
}
}
if let Some(parent) = current_dir.parent() {
current_dir = parent;
} else {
break;
}
}
None
});
#[derive(Default, Clone)]
enum DiagnosticLogger {
Null,
#[default]
Stderr,
Container(Rc<RefCell<Vec<u8>>>),
}
impl DiagnosticLogger {
pub fn writeln(&self, text: impl AsRef<str>) {
match self {
DiagnosticLogger::Null => {}
DiagnosticLogger::Stderr => {
eprintln!("{}", text.as_ref());
}
DiagnosticLogger::Container(logger) => {
let mut logger = logger.borrow_mut();
logger.write_all(text.as_ref().as_bytes()).unwrap();
logger.write_all(b"\n").unwrap();
}
}
}
}
#[derive(Default)]
pub struct TestContextBuilder {
diagnostic_logger: DiagnosticLogger,
use_http_server: bool,
use_temp_cwd: bool,
use_symlinked_temp_dir: bool,
use_canonicalized_temp_dir: bool,
/// Copies the files at the specified directory in the "testdata" directory
/// to the temp folder and runs the test from there. This is useful when
/// the test creates files in the testdata directory (ex. a node_modules folder)
copy_temp_dir: Option<String>,
temp_dir_path: Option<PathBuf>,
cwd: Option<String>,
envs: HashMap<String, String>,
}
impl TestContextBuilder {
pub fn new() -> Self {
Self::default().add_compile_env_vars()
}
pub fn for_npm() -> Self {
Self::new().use_http_server().add_npm_env_vars()
}
pub fn for_jsr() -> Self {
Self::new().use_http_server().add_jsr_env_vars()
}
pub fn logging_capture(mut self, logger: Rc<RefCell<Vec<u8>>>) -> Self {
self.diagnostic_logger = DiagnosticLogger::Container(logger);
self
}
pub fn temp_dir_path(mut self, path: impl AsRef<Path>) -> Self {
self.temp_dir_path = Some(path.as_ref().to_path_buf());
self
}
pub fn use_http_server(mut self) -> Self {
self.use_http_server = true;
self
}
pub fn use_temp_cwd(mut self) -> Self {
self.use_temp_cwd = true;
self
}
/// Causes the temp directory to be symlinked to a target directory
/// which is useful for debugging issues that only show up on the CI.
///
/// Note: This method is not actually deprecated, it's just the CI
/// does this by default so there's no need to check in any code that
/// uses this into the repo. This is just for debugging purposes.
#[deprecated]
pub fn use_symlinked_temp_dir(mut self) -> Self {
self.use_symlinked_temp_dir = true;
self
}
/// Causes the temp directory to go to its canonicalized path instead
/// of being in a symlinked temp dir on the CI.
///
/// Note: This method is not actually deprecated. It's just deprecated
/// to discourage its use. Use it sparingly and document why you're using
/// it. You better have a good reason other than being lazy!
///
/// If your tests are failing because the temp dir is symlinked on the CI,
/// then it likely means your code doesn't properly handle when Deno is running
/// in a symlinked directory. That's a bug and you should fix it without using
/// this.
#[deprecated]
pub fn use_canonicalized_temp_dir(mut self) -> Self {
self.use_canonicalized_temp_dir = true;
self
}
/// Copies the files at the specified directory in the "testdata" directory
/// to the temp folder and runs the test from there. This is useful when
/// the test creates files in the testdata directory (ex. a node_modules folder)
pub fn use_copy_temp_dir(mut self, dir: impl AsRef<str>) -> Self {
self.copy_temp_dir = Some(dir.as_ref().to_string());
self
}
pub fn cwd(mut self, cwd: impl AsRef<str>) -> Self {
self.cwd = Some(cwd.as_ref().to_string());
self
}
pub fn envs<I, K, V>(self, vars: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
K: AsRef<str>,
V: AsRef<str>,
{
let mut this = self;
for (key, value) in vars {
this = this.env(key, value);
}
this
}
pub fn env(mut self, key: impl AsRef<str>, value: impl AsRef<str>) -> Self {
self
.envs
.insert(key.as_ref().to_string(), value.as_ref().to_string());
self
}
pub fn add_npm_env_vars(mut self) -> Self {
for (key, value) in env_vars_for_npm_tests() {
self = self.env(key, value);
}
self
}
pub fn add_compile_env_vars(mut self) -> Self {
// The `denort` binary is in the same artifact directory as the `deno` binary.
let denort_bin = denort_exe_path();
self = self.env("DENORT_BIN", denort_bin.to_string());
self
}
pub fn add_jsr_env_vars(mut self) -> Self {
for (key, value) in env_vars_for_jsr_tests() {
self = self.env(key, value);
}
self
}
pub fn build(&self) -> TestContext {
if let Some(err) = &*HAS_DENO_JSON_IN_WORKING_DIR_ERR {
panic!("{}", err);
}
let temp_dir_path = PathRef::new(
self
.temp_dir_path
.clone()
.unwrap_or_else(std::env::temp_dir),
);
let temp_dir_path = if self.use_canonicalized_temp_dir {
temp_dir_path.canonicalize()
} else {
temp_dir_path
};
let deno_dir = TempDir::new_in(temp_dir_path.as_path());
let temp_dir = TempDir::new_in(temp_dir_path.as_path());
let temp_dir = if self.use_symlinked_temp_dir {
assert!(!self.use_canonicalized_temp_dir); // code doesn't handle using both of these
TempDir::new_symlinked(temp_dir)
} else {
temp_dir
};
if let Some(temp_copy_dir) = &self.copy_temp_dir {
let test_data_path = testdata_path().join(temp_copy_dir);
let temp_copy_dir = temp_dir.path().join(temp_copy_dir);
temp_copy_dir.create_dir_all();
test_data_path.copy_to_recursive(&temp_copy_dir);
}
let deno_exe = deno_exe_path();
let http_server_guard = if self.use_http_server {
Some(Rc::new(http_server()))
} else {
None
};
let cwd = if self.use_temp_cwd || self.copy_temp_dir.is_some() {
temp_dir.path().to_owned()
} else {
testdata_path().clone()
};
let cwd = match &self.cwd {
Some(specified_cwd) => cwd.join(specified_cwd),
None => cwd,
};
TestContext {
cwd,
deno_exe,
envs: self.envs.clone(),
diagnostic_logger: self.diagnostic_logger.clone(),
_http_server_guard: http_server_guard,
deno_dir,
temp_dir,
}
}
}
#[derive(Clone)]
pub struct TestContext {
deno_exe: PathRef,
diagnostic_logger: DiagnosticLogger,
envs: HashMap<String, String>,
cwd: PathRef,
_http_server_guard: Option<Rc<HttpServerGuard>>,
deno_dir: TempDir,
temp_dir: TempDir,
}
impl Default for TestContext {
fn default() -> Self {
TestContextBuilder::default().build()
}
}
impl TestContext {
pub fn with_http_server() -> Self {
TestContextBuilder::new().use_http_server().build()
}
pub fn deno_dir(&self) -> &TempDir {
&self.deno_dir
}
pub fn temp_dir(&self) -> &TempDir {
&self.temp_dir
}
pub fn new_command(&self) -> TestCommandBuilder {
TestCommandBuilder::new(self.deno_dir.clone())
.set_diagnostic_logger(self.diagnostic_logger.clone())
.envs(self.envs.clone())
.current_dir(&self.cwd)
}
pub fn new_lsp_command(&self) -> LspClientBuilder {
let mut builder = LspClientBuilder::new_with_dir(self.deno_dir.clone())
.deno_exe(&self.deno_exe)
.set_root_dir(self.temp_dir.path().clone());
for (key, value) in &self.envs {
builder = builder.env(key, value);
}
builder
}
pub fn run_deno(&self, args: impl AsRef<str>) {
self
.new_command()
.name("deno")
.args(args)
.run()
.skip_output_check();
}
pub fn run_npm(&self, args: impl AsRef<str>) {
self
.new_command()
.name("npm")
.args(args)
.run()
.skip_output_check();
}
pub fn get_jsr_package_integrity(&self, sub_path: &str) -> String {
fn get_checksum(bytes: &[u8]) -> String {
use sha2::Digest;
let mut hasher = sha2::Sha256::new();
hasher.update(bytes);
format!("{:x}", hasher.finalize())
}
let url = url::Url::parse(self.envs.get("JSR_URL").unwrap()).unwrap();
let url = url.join(&format!("{}_meta.json", sub_path)).unwrap();
let bytes = sync_fetch(url);
get_checksum(&bytes)
}
}
fn sync_fetch(url: url::Url) -> bytes::Bytes {
std::thread::scope(move |s| {
s.spawn(move || {
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
.unwrap();
runtime.block_on(async move {
let client = reqwest::Client::new();
let response = client.get(url).send().await.unwrap();
assert!(response.status().is_success());
response.bytes().await.unwrap()
})
})
.join()
.unwrap()
})
}
/// We can't clone an stdio, so if someone clones a DenoCmd,
/// we want to set this to `Cloned` and show the user a helpful
/// panic message.
enum StdioContainer {
Cloned,
Inner(RefCell<Option<Stdio>>),
}
impl Clone for StdioContainer {
fn clone(&self) -> Self {
Self::Cloned
}
}
impl StdioContainer {
pub fn new(stdio: Stdio) -> Self {
Self::Inner(RefCell::new(Some(stdio)))
}
pub fn take(&self) -> Stdio {
match self {
StdioContainer::Cloned => panic!(
"Cannot run a command after it was cloned. You need to reset the stdio value."
),
StdioContainer::Inner(inner) => match inner.borrow_mut().take() {
Some(value) => value,
None => panic!(
"Cannot run a command that was previously run. You need to reset the stdio value between runs."
),
},
}
}
}
#[derive(Clone)]
pub struct TestCommandBuilder {
deno_dir: TempDir,
diagnostic_logger: DiagnosticLogger,
stdin: Option<StdioContainer>,
stdout: Option<StdioContainer>,
stderr: Option<StdioContainer>,
stdin_text: Option<String>,
command_name: String,
cwd: Option<PathRef>,
envs: HashMap<String, String>,
envs_remove: HashSet<String>,
env_clear: bool,
args_text: String,
args_vec: Vec<String>,
split_output: bool,
show_output: bool,
}
impl TestCommandBuilder {
pub fn new(deno_dir: TempDir) -> Self {
Self {
deno_dir,
diagnostic_logger: Default::default(),
stdin: None,
stdout: None,
stderr: None,
stdin_text: None,
split_output: false,
cwd: None,
envs: Default::default(),
envs_remove: Default::default(),
env_clear: false,
command_name: "deno".to_string(),
args_text: "".to_string(),
args_vec: Default::default(),
show_output: false,
}
}
pub fn name(mut self, name: impl AsRef<OsStr>) -> Self {
self.command_name = name.as_ref().to_string_lossy().into_owned();
self
}
pub fn args(mut self, args: impl AsRef<str>) -> Self {
self.args_text = args.as_ref().to_string();
self
}
pub fn args_vec<I, S>(mut self, args: I) -> Self
where
I: IntoIterator<Item = S>,
S: AsRef<std::ffi::OsStr>,
{
self.args_vec.extend(
args
.into_iter()
.map(|s| s.as_ref().to_string_lossy().into_owned()),
);
self
}
pub fn arg<S>(mut self, arg: S) -> Self
where
S: AsRef<std::ffi::OsStr>,
{
self
.args_vec
.push(arg.as_ref().to_string_lossy().into_owned());
self
}
pub fn env_clear(mut self) -> Self {
self.env_clear = true;
self
}
pub fn envs<I, K, V>(self, vars: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
K: AsRef<std::ffi::OsStr>,
V: AsRef<std::ffi::OsStr>,
{
let mut this = self;
for (key, value) in vars {
this = this.env(key, value);
}
this
}
pub fn env<K, V>(mut self, key: K, val: V) -> Self
where
K: AsRef<std::ffi::OsStr>,
V: AsRef<std::ffi::OsStr>,
{
self.envs.insert(
key.as_ref().to_string_lossy().into_owned(),
val.as_ref().to_string_lossy().into_owned(),
);
self
}
pub fn env_remove<K>(mut self, key: K) -> Self
where
K: AsRef<std::ffi::OsStr>,
{
self
.envs_remove
.insert(key.as_ref().to_string_lossy().into_owned());
self
}
/// Set this to enable streaming the output of the command to stderr.
///
/// Not deprecated, this is just here so you don't accidentally
/// commit code with this enabled.
#[deprecated]
pub fn show_output(mut self) -> Self {
self.show_output = true;
self
}
pub fn stdin<T: Into<Stdio>>(mut self, cfg: T) -> Self {
self.stdin = Some(StdioContainer::new(cfg.into()));
self
}
pub fn stdout<T: Into<Stdio>>(mut self, cfg: T) -> Self {
self.stdout = Some(StdioContainer::new(cfg.into()));
self
}
pub fn stderr<T: Into<Stdio>>(mut self, cfg: T) -> Self {
self.stderr = Some(StdioContainer::new(cfg.into()));
self
}
pub fn current_dir<P: AsRef<OsStr>>(mut self, dir: P) -> Self {
let dir = dir.as_ref().to_string_lossy().into_owned();
self.cwd = Some(match self.cwd {
Some(current) => current.join(dir),
None => PathRef::new(dir),
});
self
}
pub fn stdin_piped(self) -> Self {
self.stdin(std::process::Stdio::piped())
}
pub fn stdout_piped(self) -> Self {
self.stdout(std::process::Stdio::piped())
}
pub fn stderr_piped(self) -> Self {
self.stderr(std::process::Stdio::piped())
}
pub fn piped_output(self) -> Self {
self.stdout_piped().stderr_piped()
}
pub fn stdin_text(mut self, text: impl AsRef<str>) -> Self {
self.stdin_text = Some(text.as_ref().to_string());
self.stdin_piped()
}
/// Splits the output into stdout and stderr rather than having them combined.
pub fn split_output(mut self) -> Self {
// Note: it was previously attempted to capture stdout & stderr separately
// then forward the output to a combined pipe, but this was found to be
// too racy compared to providing the same combined pipe to both.
self.split_output = true;
self
}
pub fn disable_diagnostic_logging(self) -> Self {
self.set_diagnostic_logger(DiagnosticLogger::Null)
}
fn set_diagnostic_logger(mut self, logger: DiagnosticLogger) -> Self {
self.diagnostic_logger = logger;
self
}
pub fn with_pty(&self, mut action: impl FnMut(Pty)) {
if !Pty::is_supported() {
return;
}
let cwd = self.build_cwd();
let args = self.build_args(&cwd);
let args = args.iter().map(|s| s.as_str()).collect::<Vec<_>>();
let mut envs = self.build_envs(&cwd);
if !envs.contains_key("NO_COLOR") {
// set this by default for pty tests
envs.insert("NO_COLOR".to_string(), "1".to_string());
}
// note(dsherret): for some reason I need to inject the current
// environment here for the pty tests or else I get dns errors
if !self.env_clear {
for (key, value) in std::env::vars() {
envs.entry(key).or_insert(value);
}
}
let command_path = self.build_command_path();
self.diagnostic_logger.writeln(format!(
"command {} {}",
command_path,
args.join(" ")
));
self
.diagnostic_logger
.writeln(format!("command cwd {}", cwd.display()));
action(Pty::new(command_path.as_path(), &args, &cwd, Some(envs)))
}
pub fn output(&self) -> Result<std::process::Output, std::io::Error> {
assert!(self.stdin_text.is_none(), "use spawn instead");
self.build_command().output()
}
pub fn status(&self) -> Result<std::process::ExitStatus, std::io::Error> {
assert!(self.stdin_text.is_none(), "use spawn instead");
self.build_command().status()
}
pub fn spawn(&self) -> Result<DenoChild, std::io::Error> {
let child = self.build_command().spawn()?;
let mut child = DenoChild {
_deno_dir: self.deno_dir.clone(),
child,
};
if let Some(input) = &self.stdin_text {
let mut p_stdin = child.stdin.take().unwrap();
write!(p_stdin, "{input}").unwrap();
}
Ok(child)
}
pub fn spawn_with_piped_output(&self) -> DenoChild {
self.clone().piped_output().spawn().unwrap()
}
pub fn run(&self) -> TestCommandOutput {
fn read_pipe_to_string(
mut pipe: os_pipe::PipeReader,
output_to_stderr: bool,
) -> String {
if output_to_stderr {
let mut buffer = vec![0; 512];
let mut final_data = Vec::new();
loop {
let size = pipe.read(&mut buffer).unwrap();
if size == 0 {
break;
}
final_data.extend(&buffer[..size]);
std::io::stderr().write_all(&buffer[..size]).unwrap();
}
String::from_utf8_lossy(&final_data).to_string()
} else {
let mut output = String::new();
pipe.read_to_string(&mut output).unwrap();
output
}
}
fn sanitize_output(text: String, args: &[OsString]) -> String {
let mut text = strip_ansi_codes(&text).to_string();
// deno test's output capturing flushes with a zero-width space in order to
// synchronize the output pipes. Occasionally this zero width space
// might end up in the output so strip it from the output comparison here.
if args.first().and_then(|s| s.to_str()) == Some("test") {
text = text.replace('\u{200B}', "");
}
text
}
let mut command = self.build_command();
let args = command
.get_args()
.map(ToOwned::to_owned)
.collect::<Vec<_>>();
let (combined_reader, std_out_err_handle) = if self.split_output {
let (stdout_reader, stdout_writer) = pipe().unwrap();
let (stderr_reader, stderr_writer) = pipe().unwrap();
command.stdout(stdout_writer);
command.stderr(stderr_writer);
let show_output = self.show_output;
(
None,
Some((
spawn_thread(move || read_pipe_to_string(stdout_reader, show_output)),
spawn_thread(move || read_pipe_to_string(stderr_reader, show_output)),
)),
)
} else {
let (combined_reader, combined_writer) = pipe().unwrap();
command.stdout(combined_writer.try_clone().unwrap());
command.stderr(combined_writer);
(Some(combined_reader), None)
};
let mut process = command.spawn().expect("Failed spawning command");
if let Some(input) = &self.stdin_text {
let mut p_stdin = process.stdin.take().unwrap();
write!(p_stdin, "{input}").unwrap();
}
// This parent process is still holding its copies of the write ends,
// and we have to close them before we read, otherwise the read end
// will never report EOF. The Command object owns the writers now,
// and dropping it closes them.
drop(command);
let combined = combined_reader.map(|pipe| {
sanitize_output(read_pipe_to_string(pipe, self.show_output), &args)
});
let status = process.wait().unwrap();
let std_out_err = std_out_err_handle.map(|(stdout, stderr)| {
(
sanitize_output(stdout.join().unwrap(), &args),
sanitize_output(stderr.join().unwrap(), &args),
)
});
let exit_code = status.code();
#[cfg(unix)]
let signal = {
use std::os::unix::process::ExitStatusExt;
status.signal()
};
#[cfg(not(unix))]
let signal = None;
TestCommandOutput {
exit_code,
signal,
combined,
std_out_err,
asserted_exit_code: RefCell::new(false),
asserted_stdout: RefCell::new(false),
asserted_stderr: RefCell::new(false),
asserted_combined: RefCell::new(false),
diagnostic_logger: self.diagnostic_logger.clone(),
_deno_dir: self.deno_dir.clone(),
}
}
fn build_command(&self) -> Command {
let command_path = self.build_command_path();
let cwd = self.build_cwd();
let args = self.build_args(&cwd);
self.diagnostic_logger.writeln(format!(
"command {} {}",
command_path,
args.join(" ")
));
let mut command = Command::new(command_path);
self
.diagnostic_logger
.writeln(format!("command cwd {}", cwd.display()));
command.current_dir(&cwd);
if let Some(stdin) = &self.stdin {
command.stdin(stdin.take());
}
if let Some(stdout) = &self.stdout {
command.stdout(stdout.take());
}
if let Some(stderr) = &self.stderr {
command.stderr(stderr.take());
}
command.args(args.iter());
if self.env_clear {
command.env_clear();
}
let envs = self.build_envs(&cwd);
command.envs(envs);
command.stdin(Stdio::piped());
command
}
fn build_command_path(&self) -> PathRef {
let command_name = if cfg!(windows) && self.command_name == "npm" {
"npm.cmd"
} else {
&self.command_name
};
if command_name == "deno" {
deno_exe_path()
} else if command_name.starts_with("./") && self.cwd.is_some() {
self.cwd.as_ref().unwrap().join(command_name)
} else {
PathRef::new(PathBuf::from(command_name))
}
}
fn build_args(&self, cwd: &Path) -> Vec<String> {
if self.args_vec.is_empty() {
std::borrow::Cow::Owned(
self
.args_text
.split_whitespace()
.map(|s| s.to_string())
.collect::<Vec<_>>(),
)
} else {
assert!(
self.args_text.is_empty(),
"Do not provide args when providing args_vec."
);
std::borrow::Cow::Borrowed(&self.args_vec)
}
.iter()
.map(|arg| self.replace_vars(arg, cwd))
.collect::<Vec<_>>()
}
fn build_cwd(&self) -> PathBuf {
self
.cwd
.as_ref()
.map(PathBuf::from)
.unwrap_or_else(|| std::env::current_dir().unwrap())
}
fn build_envs(&self, cwd: &Path) -> HashMap<String, String> {
let mut envs = self.envs.clone();
if !envs.contains_key("DENO_DIR") {
envs.insert("DENO_DIR".to_string(), self.deno_dir.path().to_string());
}
if !envs.contains_key("NPM_CONFIG_REGISTRY") {
envs.insert("NPM_CONFIG_REGISTRY".to_string(), npm_registry_unset_url());
}
if !envs.contains_key("DENO_NO_UPDATE_CHECK") {
envs.insert("DENO_NO_UPDATE_CHECK".to_string(), "1".to_string());
}
if !envs.contains_key("JSR_URL") {
envs.insert("JSR_URL".to_string(), jsr_registry_unset_url());
}
if !envs.contains_key("NODEJS_ORG_MIRROR") {
envs.insert(
"NODEJS_ORG_MIRROR".to_string(),
nodejs_org_mirror_unset_url(),
);
}
if !envs.contains_key("DENO_TSGO_PATH") {
envs.insert(
"DENO_TSGO_PATH".to_string(),
tsgo_prebuilt_path().to_string(),
);
}
if !envs.contains_key("PATH") {
let path = std::env::var_os("PATH").unwrap_or_default();
let path = std::env::split_paths(&path);
let additional = deno_exe_path().parent().to_path_buf();
let path =
std::env::join_paths(std::iter::once(additional).chain(path)).unwrap();
envs.insert("PATH".to_string(), path.to_string_lossy().to_string());
}
for key in &self.envs_remove {
envs.remove(key);
}
// update any test variables in the env value
for value in envs.values_mut() {
*value = self.replace_vars(value, cwd);
}
envs
}
fn replace_vars(&self, text: &str, cwd: &Path) -> String {
// todo(dsherret): use monch to extract out the vars
text
.replace("$DENO_DIR", &self.deno_dir.path().to_string_lossy())
.replace("$TESTDATA", &testdata_path().to_string_lossy())
.replace("$TESTS", &tests_path().to_string_lossy())
.replace("$PWD", &cwd.to_string_lossy())
}
}
pub struct DenoChild {
// keep alive for the duration of the use of this struct
_deno_dir: TempDir,
child: Child,
}
impl Deref for DenoChild {
type Target = Child;
fn deref(&self) -> &Child {
&self.child
}
}
impl DerefMut for DenoChild {
fn deref_mut(&mut self) -> &mut Child {
&mut self.child
}
}
impl DenoChild {
pub fn wait_with_output(
self,
) -> Result<std::process::Output, std::io::Error> {
self.child.wait_with_output()
}
pub fn wait_to_test_result(self, test_name: &str) -> TestResult {
let mut deno = self;
let now = Instant::now();
let stdout = deno.stdout.take().unwrap();
let no_capture = *file_test_runner::NO_CAPTURE;
let final_output = Arc::new(Mutex::new(Vec::<String>::new()));
let stdout = spawn_thread({
let final_output = final_output.clone();
let test_name = test_name.to_string();
move || {
let reader = BufReader::new(stdout);
for line in reader.lines() {
if let Ok(line) = line {
if no_capture {
println!(
"[{test_name} {:0>6.2}] {line}",
now.elapsed().as_secs_f32()
);
} else {
final_output.lock().push(line);
}
} else {
break;
}
}
}
});
let now = Instant::now();
let stderr = deno.stderr.take().unwrap();
let stderr = spawn_thread({
let final_output = final_output.clone();
let test_name = test_name.to_string();
move || {
let reader = BufReader::new(stderr);
for line in reader.lines() {
if let Ok(line) = line {
if no_capture {
eprintln!(
"[{test_name} {:0>6.2}] {line}",
now.elapsed().as_secs_f32()
);
} else {
final_output.lock().push(line);
}
} else {
break;
}
}
}
});
const PER_TEST_TIMEOUT: Duration = Duration::from_secs(5 * 60);
let get_failure_result = |duration: Duration, error_message: String| {
let mut final_output = std::mem::take(&mut *final_output.lock());
final_output.push(error_message);
TestResult::Failed {
duration: Some(duration),
output: final_output.join("\n").into_bytes(),
}
};
let now = Instant::now();
let status = loop {
if now.elapsed() > PER_TEST_TIMEOUT {
// Last-ditch kill
_ = deno.kill();
return get_failure_result(
now.elapsed(),
format!("Test {} failed to complete in time", test_name),
);
}
if let Some(status) = deno
.try_wait()
.expect("failed to wait for the child process")
{
break status;
}
std::thread::sleep(Duration::from_millis(100));
};
let duration = now.elapsed();
#[cfg(unix)]
if let Some(signal) = std::os::unix::process::ExitStatusExt::signal(&status)
{
return get_failure_result(
duration,
format!("{:?}\nDeno should not have died with a signal", signal,),
);
}
if status.code() != Some(0) {
return get_failure_result(
duration,
format!(
"Deno should have exited cleanly (code: {:?})",
status.code(),
),
);
}
stdout.join().unwrap();
stderr.join().unwrap();
TestResult::Passed {
duration: Some(duration),
}
}
}
pub struct TestCommandOutput {
combined: Option<String>,
std_out_err: Option<(String, String)>,
exit_code: Option<i32>,
signal: Option<i32>,
asserted_stdout: RefCell<bool>,
asserted_stderr: RefCell<bool>,
asserted_combined: RefCell<bool>,
asserted_exit_code: RefCell<bool>,
diagnostic_logger: DiagnosticLogger,
// keep alive for the duration of the output reference
_deno_dir: TempDir,
}
impl Drop for TestCommandOutput {
// assert the output and exit code was asserted
fn drop(&mut self) {
fn panic_unasserted_output(output: &TestCommandOutput, text: &str) {
output
.diagnostic_logger
.writeln(format!("OUTPUT\n{}\nOUTPUT", text));
panic!(concat!(
"The non-empty text of the command was not asserted. ",
"Call `output.skip_output_check()` to skip if necessary.",
));
}
if std::thread::panicking() {
return;
}
// either the combined output needs to be asserted or both stdout and stderr
if let Some(combined) = &self.combined
&& !*self.asserted_combined.borrow()
&& !combined.is_empty()
{
panic_unasserted_output(self, combined);
}
if let Some((stdout, stderr)) = &self.std_out_err {
if !*self.asserted_stdout.borrow() && !stdout.is_empty() {
panic_unasserted_output(self, stdout);
}
if !*self.asserted_stderr.borrow() && !stderr.is_empty() {
panic_unasserted_output(self, stderr);
}
}
// now ensure the exit code was asserted
if !*self.asserted_exit_code.borrow() && self.exit_code != Some(0) {
self.print_output();
panic!(
"The non-zero exit code of the command was not asserted: {:?}",
self.exit_code,
)
}
}
}
impl TestCommandOutput {
pub fn skip_output_check(&self) -> &Self {
*self.asserted_combined.borrow_mut() = true;
self.skip_stdout_check();
self.skip_stderr_check();
self
}
pub fn skip_stdout_check(&self) -> &Self {
*self.asserted_stdout.borrow_mut() = true;
self
}
pub fn skip_stderr_check(&self) -> &Self {
*self.asserted_stderr.borrow_mut() = true;
self
}
pub fn skip_exit_code_check(&self) -> &Self {
*self.asserted_exit_code.borrow_mut() = true;
self
}
pub fn exit_code(&self) -> Option<i32> {
self.skip_exit_code_check();
self.exit_code
}
pub fn signal(&self) -> Option<i32> {
self.signal
}
pub fn combined_output(&self) -> &str {
self.skip_output_check();
self
.combined
.as_deref()
.expect("not available since .split_output() was called")
}
pub fn stdout(&self) -> &str {
*self.asserted_stdout.borrow_mut() = true;
self
.std_out_err
.as_ref()
.map(|(stdout, _)| stdout.as_str())
.expect("call .split_output() on the builder")
}
pub fn stderr(&self) -> &str {
*self.asserted_stderr.borrow_mut() = true;
self
.std_out_err
.as_ref()
.map(|(_, stderr)| stderr.as_str())
.expect("call .split_output() on the builder")
}
#[track_caller]
pub fn assert_exit_code(&self, expected_exit_code: i32) -> &Self {
let actual_exit_code = self.exit_code();
if let Some(exit_code) = &actual_exit_code {
if *exit_code != expected_exit_code {
self.print_output();
panic!(
"bad exit code, expected: {:?}, actual: {:?}",
expected_exit_code, exit_code,
);
}
} else {
self.print_output();
if let Some(signal) = self.signal() {
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/macros.rs | tests/util/server/src/macros.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#[macro_export]
// https://stackoverflow.com/questions/38088067/equivalent-of-func-or-function-in-rust
macro_rules! function {
() => {{
fn f() {}
fn type_name_of<T>(_: T) -> &'static str {
::std::any::type_name::<T>()
}
let name = type_name_of(f);
let name = name.strip_suffix("::f").unwrap_or(name);
let name = name.strip_suffix("::{{closure}}").unwrap_or(name);
name
}};
}
/// Detect a test timeout and panic with a message that includes the test name.
/// By default, the test timeout is 300 seconds (5 minutes), but any value may
/// be specified as an argument to this function.
#[macro_export]
macro_rules! timeout {
( $($timeout:literal)? ) => {
let _test_timeout_holder = {
let function = $crate::function!();
let timeout: &[u64] = &[$($timeout)?];
let timeout = *timeout.get(0).unwrap_or(&300);
$crate::test_runner::with_timeout(
function.to_string(),
::std::time::Duration::from_secs(timeout)
)
};
};
}
#[macro_export]
macro_rules! itest(
($name:ident {$( $key:ident: $value:expr,)*}) => {
#[test]
fn $name() {
$crate::timeout!();
let test = $crate::CheckOutputIntegrationTest {
$(
$key: $value,
)*
.. Default::default()
};
let output = test.output();
output.assert_exit_code(test.exit_code);
if !test.output.is_empty() {
assert!(test.output_str.is_none());
output.assert_matches_file(test.output);
} else {
output.assert_matches_text(test.output_str.unwrap_or(""));
}
}
}
);
#[macro_export]
macro_rules! context(
({$( $key:ident: $value:expr,)*}) => {
$crate::TestContext::create($crate::TestContextOptions {
$(
$key: $value,
)*
.. Default::default()
})
}
);
#[macro_export]
macro_rules! command_step(
({$( $key:ident: $value:expr,)*}) => {
$crate::CheckOutputIntegrationTestCommandStep {
$(
$key: $value,
)*
.. Default::default()
}
}
);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/pty.rs | tests/util/server/src/pty.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::time::Duration;
use std::time::Instant;
use crate::IS_CI;
use crate::eprintln;
use crate::strip_ansi_codes;
const PTY_ROWS_COLS: (u16, u16) = (500, 800);
/// Points to know about when writing pty tests:
///
/// - Consecutive writes cause issues where you might write while a prompt
/// is not showing. So when you write, always `.expect(...)` on the output.
/// - Similar to the last point, using `.expect(...)` can help make the test
/// more deterministic. If the test is flaky, try adding more `.expect(...)`s
pub struct Pty {
pty: Box<dyn SystemPty>,
read_bytes: Vec<u8>,
last_index: usize,
}
impl Pty {
pub fn new(
program: &Path,
args: &[&str],
cwd: &Path,
env_vars: Option<HashMap<String, String>>,
) -> Self {
let pty = create_pty(program, args, cwd, env_vars);
let mut pty = Self {
pty,
read_bytes: Vec::new(),
last_index: 0,
};
if args.is_empty() || args[0] == "repl" && !args.contains(&"--quiet") {
// wait for the repl to start up before writing to it
pty.read_until_condition_with_timeout(
|pty| {
pty
.all_output()
.contains("exit using ctrl+d, ctrl+c, or close()")
},
// it sometimes takes a while to startup on the CI, so use a longer timeout
Duration::from_secs(60),
);
}
pty
}
pub fn is_supported() -> bool {
let is_windows = cfg!(windows);
if is_windows && *IS_CI {
// the pty tests don't really start up on the windows CI for some reason
// so ignore them for now
eprintln!("Ignoring windows CI.");
false
} else {
true
}
}
#[track_caller]
pub fn write_raw(&mut self, line: impl AsRef<str>) {
let line = if cfg!(windows) {
line.as_ref().replace("\r\n", "\n").replace('\n', "\r\n")
} else {
line.as_ref().to_string()
};
if let Err(err) = self.pty.write(line.as_bytes()) {
panic!("{:#}", err)
}
self.pty.flush().unwrap();
}
/// Pause for a human-like delay to read or react to something (human responses are ~100ms).
#[track_caller]
pub fn human_delay(&mut self) {
std::thread::sleep(Duration::from_millis(250));
}
#[track_caller]
pub fn write_line(&mut self, line: impl AsRef<str>) {
self.write_line_raw(&line);
// expect what was written to show up in the output
// due to "pty echo"
for line in line.as_ref().lines() {
self.expect(line);
}
}
/// Writes a line without checking if it's in the output.
#[track_caller]
pub fn write_line_raw(&mut self, line: impl AsRef<str>) {
self.write_raw(format!("{}\n", line.as_ref()));
}
#[track_caller]
pub fn read_until(&mut self, end_text: impl AsRef<str>) -> String {
self.read_until_with_advancing(|text| {
text
.find(end_text.as_ref())
.map(|index| index + end_text.as_ref().len())
})
}
#[track_caller]
pub fn expect(&mut self, text: impl AsRef<str>) {
self.read_until(text.as_ref());
}
#[track_caller]
pub fn expect_any(&mut self, texts: &[&str]) {
self.read_until_with_advancing(|text| {
for find_text in texts {
if let Some(index) = text.find(find_text) {
return Some(index);
}
}
None
});
}
/// Consumes and expects to find all the text until a timeout is hit.
#[track_caller]
pub fn expect_all(&mut self, texts: &[&str]) {
let mut pending_texts: HashSet<&&str> = HashSet::from_iter(texts);
let mut max_index: Option<usize> = None;
self.read_until_with_advancing(|text| {
for pending_text in pending_texts.clone() {
if let Some(index) = text.find(pending_text) {
let index = index + pending_text.len();
match &max_index {
Some(current) => {
if *current < index {
max_index = Some(index);
}
}
None => {
max_index = Some(index);
}
}
pending_texts.remove(pending_text);
}
}
if pending_texts.is_empty() {
max_index
} else {
None
}
});
}
/// Expects the raw text to be found, which may include ANSI codes.
/// Note: this expects the raw bytes in any output that has already
/// occurred or may occur within the next few seconds.
#[track_caller]
pub fn expect_raw_in_current_output(&mut self, text: impl AsRef<str>) {
self.read_until_condition(|pty| {
let data = String::from_utf8_lossy(&pty.read_bytes);
data.contains(text.as_ref())
});
}
/// Expects the raw text to be found next.
#[track_caller]
pub fn expect_raw_next(&mut self, text: impl AsRef<str>) {
let expected = text.as_ref();
let last_index = self.read_bytes.len();
self.read_until_condition(|pty| {
if pty.read_bytes.len() >= last_index + expected.len() {
let data = String::from_utf8_lossy(
&pty.read_bytes[last_index..last_index + expected.len()],
);
data == expected
} else {
false
}
});
}
pub fn all_output(&self) -> Cow<'_, str> {
String::from_utf8_lossy(&self.read_bytes)
}
#[track_caller]
fn read_until_with_advancing(
&mut self,
mut condition: impl FnMut(&str) -> Option<usize>,
) -> String {
let mut final_text = String::new();
self.read_until_condition(|pty| {
let text = pty.next_text();
if let Some(end_index) = condition(&text) {
pty.last_index += end_index;
final_text = text[..end_index].to_string();
true
} else {
false
}
});
final_text
}
#[track_caller]
fn read_until_condition(&mut self, condition: impl FnMut(&mut Self) -> bool) {
let duration = if *IS_CI {
Duration::from_secs(30)
} else {
Duration::from_secs(15)
};
self.read_until_condition_with_timeout(condition, duration);
}
#[track_caller]
fn read_until_condition_with_timeout(
&mut self,
condition: impl FnMut(&mut Self) -> bool,
timeout_duration: Duration,
) {
if self.try_read_until_condition_with_timeout(condition, timeout_duration) {
return;
}
panic!("Timed out.")
}
/// Reads until the specified condition with a timeout duration returning
/// `true` on success or `false` on timeout.
fn try_read_until_condition_with_timeout(
&mut self,
mut condition: impl FnMut(&mut Self) -> bool,
timeout_duration: Duration,
) -> bool {
let timeout_time = Instant::now().checked_add(timeout_duration).unwrap();
while Instant::now() < timeout_time {
self.fill_more_bytes();
if condition(self) {
return true;
}
}
let text = self.next_text();
eprintln!(
"------ Start Full Text ------\n{:?}\n------- End Full Text -------",
String::from_utf8_lossy(&self.read_bytes)
);
eprintln!("Next text: {:?}", text);
false
}
fn next_text(&self) -> String {
let text = String::from_utf8_lossy(&self.read_bytes).to_string();
let text = strip_ansi_codes(&text);
text[self.last_index..].to_string()
}
fn fill_more_bytes(&mut self) {
let mut buf = [0; 256];
match self.pty.read(&mut buf) {
Ok(count) if count > 0 => {
self.read_bytes.extend(&buf[..count]);
}
_ => {
// be a bit easier on the CI
std::thread::sleep(Duration::from_millis(if *IS_CI {
100
} else {
20
}));
}
}
}
}
trait SystemPty: Read + Write {}
impl SystemPty for std::fs::File {}
#[cfg(unix)]
fn setup_pty(fd: i32) {
use nix::fcntl::FcntlArg;
use nix::fcntl::OFlag;
use nix::fcntl::fcntl;
use nix::sys::termios;
use nix::sys::termios::SetArg;
use nix::sys::termios::tcgetattr;
use nix::sys::termios::tcsetattr;
// SAFETY: Nix crate requires value to implement the AsFd trait
let as_fd = unsafe { std::os::fd::BorrowedFd::borrow_raw(fd) };
let mut term = tcgetattr(as_fd).unwrap();
// disable cooked mode
term.local_flags.remove(termios::LocalFlags::ICANON);
tcsetattr(as_fd, SetArg::TCSANOW, &term).unwrap();
// turn on non-blocking mode so we get timeouts
let flags = fcntl(fd, FcntlArg::F_GETFL).unwrap();
let new_flags = OFlag::from_bits_truncate(flags) | OFlag::O_NONBLOCK;
fcntl(fd, FcntlArg::F_SETFL(new_flags)).unwrap();
}
#[cfg(unix)]
fn set_winsize(
fd: std::os::fd::RawFd,
rows: u16,
cols: u16,
) -> std::io::Result<()> {
let ws = libc::winsize {
ws_row: rows,
ws_col: cols,
ws_xpixel: 0,
ws_ypixel: 0,
};
// SAFETY: set windows size
if unsafe { libc::ioctl(fd, libc::TIOCSWINSZ, &ws) == -1 } {
return Err(std::io::Error::last_os_error());
}
Ok(())
}
#[cfg(unix)]
fn create_pty(
program: &Path,
args: &[&str],
cwd: &Path,
env_vars: Option<HashMap<String, String>>,
) -> Box<dyn SystemPty> {
use std::os::unix::process::CommandExt;
use crate::pty::unix::UnixPty;
// Manually open pty main/secondary sides in the test process. Since we're not actually
// changing uid/gid here, this is the easiest way to do it.
// SAFETY: Posix APIs
let (fdm, fds) = unsafe {
let fdm = libc::posix_openpt(libc::O_RDWR);
if fdm < 0 {
panic!("posix_openpt failed");
}
let res = libc::grantpt(fdm);
if res != 0 {
panic!("grantpt failed");
}
let res = libc::unlockpt(fdm);
if res != 0 {
panic!("unlockpt failed");
}
let fds = libc::open(libc::ptsname(fdm), libc::O_RDWR);
if fdm < 0 {
panic!("open(ptsname) failed");
}
(fdm, fds)
};
// SAFETY: Posix APIs
unsafe {
#[allow(clippy::zombie_processes)]
let cmd = std::process::Command::new(program)
.current_dir(cwd)
.args(args)
.envs(env_vars.unwrap_or_default())
.pre_exec(move || {
set_winsize(fds, PTY_ROWS_COLS.0, PTY_ROWS_COLS.1)?;
// Close parent's main handle
libc::close(fdm);
libc::dup2(fds, 0);
libc::dup2(fds, 1);
libc::dup2(fds, 2);
// Note that we could close `fds` here as well, but this is a short-lived process and
// we're just not going to worry about "leaking" it
Ok(())
})
.spawn()
.unwrap();
// Close child's secondary handle
libc::close(fds);
setup_pty(fdm);
use std::os::fd::FromRawFd;
let pid = nix::unistd::Pid::from_raw(cmd.id() as _);
let file = std::fs::File::from_raw_fd(fdm);
Box::new(UnixPty { pid, file })
}
}
#[cfg(unix)]
mod unix {
use std::io::Read;
use std::io::Write;
use super::SystemPty;
pub struct UnixPty {
pub pid: nix::unistd::Pid,
pub file: std::fs::File,
}
impl Drop for UnixPty {
fn drop(&mut self) {
use nix::sys::signal::Signal;
use nix::sys::signal::kill;
kill(self.pid, Signal::SIGTERM).unwrap()
}
}
impl SystemPty for UnixPty {}
impl Read for UnixPty {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.file.read(buf)
}
}
impl Write for UnixPty {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.file.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.file.flush()
}
}
}
#[cfg(target_os = "windows")]
fn create_pty(
program: &Path,
args: &[&str],
cwd: &Path,
env_vars: Option<HashMap<String, String>>,
) -> Box<dyn SystemPty> {
let pty = windows::WinPseudoConsole::new(program, args, cwd, env_vars);
Box::new(pty)
}
#[cfg(target_os = "windows")]
mod windows {
use std::collections::HashMap;
use std::io::ErrorKind;
use std::io::Read;
use std::path::Path;
use std::ptr;
use std::time::Duration;
use winapi::shared::minwindef::FALSE;
use winapi::shared::minwindef::LPVOID;
use winapi::shared::minwindef::TRUE;
use winapi::shared::winerror::S_OK;
use winapi::um::consoleapi::ClosePseudoConsole;
use winapi::um::consoleapi::CreatePseudoConsole;
use winapi::um::fileapi::FlushFileBuffers;
use winapi::um::fileapi::ReadFile;
use winapi::um::fileapi::WriteFile;
use winapi::um::handleapi::DuplicateHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::namedpipeapi::CreatePipe;
use winapi::um::namedpipeapi::PeekNamedPipe;
use winapi::um::processthreadsapi::CreateProcessW;
use winapi::um::processthreadsapi::DeleteProcThreadAttributeList;
use winapi::um::processthreadsapi::GetCurrentProcess;
use winapi::um::processthreadsapi::InitializeProcThreadAttributeList;
use winapi::um::processthreadsapi::LPPROC_THREAD_ATTRIBUTE_LIST;
use winapi::um::processthreadsapi::PROCESS_INFORMATION;
use winapi::um::processthreadsapi::UpdateProcThreadAttribute;
use winapi::um::synchapi::WaitForSingleObject;
use winapi::um::winbase::CREATE_UNICODE_ENVIRONMENT;
use winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use winapi::um::winbase::INFINITE;
use winapi::um::winbase::STARTUPINFOEXW;
use winapi::um::wincontypes::COORD;
use winapi::um::wincontypes::HPCON;
use winapi::um::winnt::DUPLICATE_SAME_ACCESS;
use winapi::um::winnt::HANDLE;
use super::PTY_ROWS_COLS;
use super::SystemPty;
use crate::print::spawn_thread;
macro_rules! assert_win_success {
($expression:expr) => {
let success = $expression;
if success != TRUE {
panic!("{}", std::io::Error::last_os_error().to_string())
}
};
}
macro_rules! handle_err {
($expression:expr) => {
let success = $expression;
if success != TRUE {
return Err(std::io::Error::last_os_error());
}
};
}
pub struct WinPseudoConsole {
stdin_write_handle: WinHandle,
stdout_read_handle: WinHandle,
// keep these alive for the duration of the pseudo console
_process_handle: WinHandle,
_thread_handle: WinHandle,
_attribute_list: ProcThreadAttributeList,
}
impl WinPseudoConsole {
pub fn new(
program: &Path,
args: &[&str],
cwd: &Path,
maybe_env_vars: Option<HashMap<String, String>>,
) -> Self {
// https://docs.microsoft.com/en-us/windows/console/creating-a-pseudoconsole-session
// SAFETY:
// Generous use of winapi to create a PTY (thus large unsafe block).
unsafe {
let mut size: COORD = std::mem::zeroed();
size.Y = PTY_ROWS_COLS.0 as i16;
size.X = PTY_ROWS_COLS.1 as i16;
let mut console_handle = std::ptr::null_mut();
let (stdin_read_handle, stdin_write_handle) = create_pipe();
let (stdout_read_handle, stdout_write_handle) = create_pipe();
let result = CreatePseudoConsole(
size,
stdin_read_handle.as_raw_handle(),
stdout_write_handle.as_raw_handle(),
0,
&mut console_handle,
);
assert_eq!(result, S_OK);
let mut environment_vars = maybe_env_vars.map(get_env_vars);
let mut attribute_list = ProcThreadAttributeList::new(console_handle);
let mut startup_info: STARTUPINFOEXW = std::mem::zeroed();
startup_info.StartupInfo.cb =
std::mem::size_of::<STARTUPINFOEXW>() as u32;
startup_info.lpAttributeList = attribute_list.as_mut_ptr();
let mut proc_info: PROCESS_INFORMATION = std::mem::zeroed();
let command = format!(
"\"{}\" {}",
program.to_string_lossy(),
args
.iter()
.map(|a| format!("\"{}\"", a))
.collect::<Vec<_>>()
.join(" ")
)
.trim()
.to_string();
let mut application_str = to_windows_str(&program.to_string_lossy());
let mut command_str = to_windows_str(&command);
let cwd = cwd.to_string_lossy().replace('/', "\\");
let mut cwd = to_windows_str(&cwd);
assert_win_success!(CreateProcessW(
application_str.as_mut_ptr(),
command_str.as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
FALSE,
EXTENDED_STARTUPINFO_PRESENT | CREATE_UNICODE_ENVIRONMENT,
environment_vars
.as_mut()
.map(|v| v.as_mut_ptr() as LPVOID)
.unwrap_or(ptr::null_mut()),
cwd.as_mut_ptr(),
&mut startup_info.StartupInfo,
&mut proc_info,
));
// close the handles that the pseudoconsole now has
drop(stdin_read_handle);
drop(stdout_write_handle);
// start a thread that will close the pseudoconsole on process exit
let thread_handle = WinHandle::new(proc_info.hThread);
spawn_thread({
let thread_handle = thread_handle.duplicate();
let console_handle = WinHandle::new(console_handle);
move || {
WaitForSingleObject(thread_handle.as_raw_handle(), INFINITE);
// wait for the reading thread to catch up
std::thread::sleep(Duration::from_millis(200));
// close the console handle which will close the
// stdout pipe for the reader
ClosePseudoConsole(console_handle.into_raw_handle());
}
});
Self {
stdin_write_handle,
stdout_read_handle,
_process_handle: WinHandle::new(proc_info.hProcess),
_thread_handle: thread_handle,
_attribute_list: attribute_list,
}
}
}
}
impl Read for WinPseudoConsole {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
// don't do a blocking read in order to support timing out
let mut bytes_available = 0;
// SAFETY: winapi call
handle_err!(unsafe {
PeekNamedPipe(
self.stdout_read_handle.as_raw_handle(),
ptr::null_mut(),
0,
ptr::null_mut(),
&mut bytes_available,
ptr::null_mut(),
)
});
if bytes_available == 0 {
return Err(std::io::Error::new(ErrorKind::WouldBlock, "Would block."));
}
let mut bytes_read = 0;
// SAFETY: winapi call
handle_err!(unsafe {
ReadFile(
self.stdout_read_handle.as_raw_handle(),
buf.as_mut_ptr() as _,
buf.len() as u32,
&mut bytes_read,
ptr::null_mut(),
)
});
Ok(bytes_read as usize)
}
}
impl SystemPty for WinPseudoConsole {}
impl std::io::Write for WinPseudoConsole {
fn write(&mut self, buffer: &[u8]) -> std::io::Result<usize> {
let mut bytes_written = 0;
// SAFETY:
// winapi call
handle_err!(unsafe {
WriteFile(
self.stdin_write_handle.as_raw_handle(),
buffer.as_ptr() as *const _,
buffer.len() as u32,
&mut bytes_written,
ptr::null_mut(),
)
});
Ok(bytes_written as usize)
}
fn flush(&mut self) -> std::io::Result<()> {
// SAFETY: winapi call
handle_err!(unsafe {
FlushFileBuffers(self.stdin_write_handle.as_raw_handle())
});
Ok(())
}
}
struct WinHandle {
inner: HANDLE,
}
impl WinHandle {
pub fn new(handle: HANDLE) -> Self {
WinHandle { inner: handle }
}
pub fn duplicate(&self) -> WinHandle {
// SAFETY: winapi call
let process_handle = unsafe { GetCurrentProcess() };
let mut duplicate_handle = ptr::null_mut();
// SAFETY: winapi call
assert_win_success!(unsafe {
DuplicateHandle(
process_handle,
self.inner,
process_handle,
&mut duplicate_handle,
0,
0,
DUPLICATE_SAME_ACCESS,
)
});
WinHandle::new(duplicate_handle)
}
pub fn as_raw_handle(&self) -> HANDLE {
self.inner
}
pub fn into_raw_handle(self) -> HANDLE {
let handle = self.inner;
// skip the drop implementation in order to not close the handle
std::mem::forget(self);
handle
}
}
// SAFETY: These handles are ok to send across threads.
unsafe impl Send for WinHandle {}
// SAFETY: These handles are ok to send across threads.
unsafe impl Sync for WinHandle {}
impl Drop for WinHandle {
fn drop(&mut self) {
if !self.inner.is_null() && self.inner != INVALID_HANDLE_VALUE {
// SAFETY: winapi call
unsafe {
winapi::um::handleapi::CloseHandle(self.inner);
}
}
}
}
struct ProcThreadAttributeList {
buffer: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn new(console_handle: HPCON) -> Self {
// SAFETY:
// Generous use of unsafe winapi calls to create a ProcThreadAttributeList.
unsafe {
// discover size required for the list
let mut size = 0;
let attribute_count = 1;
assert_eq!(
InitializeProcThreadAttributeList(
ptr::null_mut(),
attribute_count,
0,
&mut size
),
FALSE
);
let mut buffer = vec![0u8; size];
let attribute_list_ptr = buffer.as_mut_ptr() as _;
assert_win_success!(InitializeProcThreadAttributeList(
attribute_list_ptr,
attribute_count,
0,
&mut size,
));
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
assert_win_success!(UpdateProcThreadAttribute(
attribute_list_ptr,
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
console_handle,
std::mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
));
ProcThreadAttributeList { buffer }
}
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.buffer.as_mut_slice().as_mut_ptr() as *mut _
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
// SAFETY: winapi call
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
fn create_pipe() -> (WinHandle, WinHandle) {
let mut read_handle = std::ptr::null_mut();
let mut write_handle = std::ptr::null_mut();
// SAFETY: Creating an anonymous pipe with winapi.
assert_win_success!(unsafe {
CreatePipe(&mut read_handle, &mut write_handle, ptr::null_mut(), 0)
});
(WinHandle::new(read_handle), WinHandle::new(write_handle))
}
fn to_windows_str(str: &str) -> Vec<u16> {
use std::os::windows::prelude::OsStrExt;
std::ffi::OsStr::new(str)
.encode_wide()
.chain(Some(0))
.collect()
}
fn get_env_vars(env_vars: HashMap<String, String>) -> Vec<u16> {
// See lpEnvironment: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessw
let mut parts = env_vars
.into_iter()
// each environment variable is in the form `name=value\0`
.map(|(key, value)| format!("{key}={value}\0"))
.collect::<Vec<_>>();
// all strings in an environment block must be case insensitively
// sorted alphabetically by name
// https://docs.microsoft.com/en-us/windows/win32/procthread/changing-environment-variables
parts.sort_by_key(|part| part.to_lowercase());
// the entire block is terminated by NULL (\0)
format!("{}\0", parts.join(""))
.encode_utf16()
.collect::<Vec<_>>()
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/semaphore.rs | tests/util/server/src/semaphore.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use parking_lot::Condvar;
use parking_lot::Mutex;
struct Permits {
max: usize,
used: usize,
}
pub struct Permit<'a>(&'a Semaphore);
impl<'a> Drop for Permit<'a> {
fn drop(&mut self) {
let mut permits = self.0.permits.lock();
if permits.used == 0 {
return;
}
permits.used -= 1;
if permits.used < permits.max {
drop(permits);
self.0.condvar.notify_one();
}
}
}
pub struct Semaphore {
permits: Mutex<Permits>,
condvar: Condvar,
}
impl Semaphore {
pub fn new(max_permits: usize) -> Self {
Semaphore {
permits: Mutex::new(Permits {
max: max_permits,
used: 0,
}),
condvar: Condvar::new(),
}
}
pub fn acquire(&self) -> Permit<'_> {
{
let mut permits = self.permits.lock();
while permits.used >= permits.max {
self.condvar.wait(&mut permits);
}
permits.used += 1;
}
Permit(self)
}
pub fn set_max(&self, n: usize) {
let mut permits = self.permits.lock();
let is_greater = n > permits.max;
permits.max = n;
drop(permits);
if is_greater {
self.condvar.notify_all(); // Wake up waiting threads
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use super::*;
#[test]
fn test_multiple_acquire_release() {
let sem = Semaphore::new(3);
let permit1 = sem.acquire();
let permit2 = sem.acquire();
let permit3 = sem.acquire();
drop(permit3);
drop(permit2);
drop(permit1);
}
#[test]
fn test_concurrent_access() {
let sem = Arc::new(Semaphore::new(2));
let mut handles = vec![];
for _ in 0..5 {
let sem_clone = Arc::clone(&sem);
#[allow(clippy::disallowed_methods)]
let handle = thread::spawn(move || {
let _perimt = sem_clone.acquire();
thread::sleep(Duration::from_millis(10));
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}
#[test]
fn test_blocking_behavior() {
let sem = Arc::new(Semaphore::new(1));
let sem_clone = Arc::clone(&sem);
let permit = sem.acquire();
#[allow(clippy::disallowed_methods)]
let handle = thread::spawn(move || {
let start = std::time::Instant::now();
let _permit = sem_clone.acquire();
start.elapsed()
});
thread::sleep(Duration::from_millis(50));
drop(permit);
let elapsed = handle.join().unwrap();
assert!(elapsed >= Duration::from_millis(40));
}
#[test]
fn test_set_max_increase() {
let sem = Arc::new(Semaphore::new(1));
let sem_clone = Arc::clone(&sem);
let permit = sem.acquire();
#[allow(clippy::disallowed_methods)]
let handle = thread::spawn(move || {
let _permit = sem_clone.acquire();
});
thread::sleep(Duration::from_millis(10));
sem.set_max(2);
handle.join().unwrap();
drop(permit);
}
#[test]
fn test_set_max_decrease() {
let sem = Semaphore::new(3);
let permit1 = sem.acquire();
let permit2 = sem.acquire();
sem.set_max(1);
drop(permit1);
drop(permit2);
}
#[test]
fn test_zero_permits_with_set_max() {
let sem = Arc::new(Semaphore::new(0));
let sem_clone = Arc::clone(&sem);
#[allow(clippy::disallowed_methods)]
let handle = thread::spawn(move || {
let _permit = sem_clone.acquire();
});
thread::sleep(Duration::from_millis(10));
sem.set_max(1);
handle.join().unwrap();
}
#[test]
fn test_multiple_threads_wait_and_proceed() {
let sem = Arc::new(Semaphore::new(1));
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let sem_clone = Arc::clone(&sem);
let counter_clone = Arc::clone(&counter);
#[allow(clippy::disallowed_methods)]
let handle = thread::spawn(move || {
let _permit = sem_clone.acquire();
let mut count = counter_clone.lock();
*count += 1;
thread::sleep(Duration::from_millis(5));
drop(count);
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
let final_count = *counter.lock();
assert_eq!(final_count, 10);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/npm.rs | tests/util/server/src/npm.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use anyhow::Context;
use anyhow::Result;
use base64::Engine;
use base64::prelude::BASE64_STANDARD;
use flate2::Compression;
use flate2::write::GzEncoder;
use once_cell::sync::Lazy;
use parking_lot::Mutex;
use tar::Builder;
use crate::PathRef;
use crate::root_path;
use crate::tests_path;
pub const DENOTEST_SCOPE_NAME: &str = "@denotest";
pub const DENOTEST2_SCOPE_NAME: &str = "@denotest2";
pub const DENOTEST3_SCOPE_NAME: &str = "@denotest3";
pub const ESBUILD_VERSION: &str = "0.25.5";
pub static PUBLIC_TEST_NPM_REGISTRY: Lazy<TestNpmRegistry> = Lazy::new(|| {
TestNpmRegistry::new(
NpmRegistryKind::Public,
&format!(
"http://localhost:{}",
crate::servers::PUBLIC_NPM_REGISTRY_PORT
),
"npm",
)
});
pub static PRIVATE_TEST_NPM_REGISTRY_1: Lazy<TestNpmRegistry> =
Lazy::new(|| {
TestNpmRegistry::new(
NpmRegistryKind::Private,
&format!(
"http://localhost:{}",
crate::servers::PRIVATE_NPM_REGISTRY_1_PORT
),
"npm-private",
)
});
pub static PRIVATE_TEST_NPM_REGISTRY_2: Lazy<TestNpmRegistry> =
Lazy::new(|| {
TestNpmRegistry::new(
NpmRegistryKind::Private,
&format!(
"http://localhost:{}",
crate::servers::PRIVATE_NPM_REGISTRY_2_PORT
),
"npm-private2",
)
});
pub static PRIVATE_TEST_NPM_REGISTRY_3: Lazy<TestNpmRegistry> =
Lazy::new(|| {
TestNpmRegistry::new(
NpmRegistryKind::Private,
&format!(
"http://localhost:{}",
crate::servers::PRIVATE_NPM_REGISTRY_3_PORT
),
"npm-private3",
)
});
pub enum NpmRegistryKind {
Public,
Private,
}
struct CustomNpmPackage {
pub registry_file: String,
pub tarballs: HashMap<String, Vec<u8>>,
}
/// Creates tarballs and a registry json file for npm packages
/// in the `tests/registry/npm/@denotest` directory.
pub struct TestNpmRegistry {
#[allow(unused)]
kind: NpmRegistryKind,
// Eg. http://localhost:4544/
hostname: String,
/// Path in the tests/registry folder (Eg. npm)
local_path: String,
cache: Mutex<HashMap<String, CustomNpmPackage>>,
}
impl TestNpmRegistry {
pub fn new(kind: NpmRegistryKind, hostname: &str, local_path: &str) -> Self {
let hostname = hostname.strip_suffix('/').unwrap_or(hostname).to_string();
Self {
hostname,
local_path: local_path.to_string(),
kind,
cache: Default::default(),
}
}
pub fn root_dir(&self) -> PathRef {
tests_path().join("registry").join(&self.local_path)
}
pub fn tarball_bytes(
&self,
name: &str,
version: &str,
) -> Result<Option<Vec<u8>>> {
Ok(
self
.get_package_property(name, |p| p.tarballs.get(version).cloned())?
.flatten(),
)
}
pub fn registry_file(&self, name: &str) -> Result<Option<Vec<u8>>> {
self.get_package_property(name, |p| p.registry_file.as_bytes().to_vec())
}
pub fn package_url(&self, package_name: &str) -> String {
let scheme = if self.hostname.starts_with("http://") {
""
} else {
"http://"
};
format!("{}{}/{}/", scheme, self.hostname, package_name)
}
fn get_package_property<TResult>(
&self,
package_name: &str,
func: impl FnOnce(&CustomNpmPackage) -> TResult,
) -> Result<Option<TResult>> {
// it's ok if multiple threads race here as they will do the same work twice
if !self.cache.lock().contains_key(package_name) {
match get_npm_package(&self.hostname, &self.local_path, package_name)? {
Some(package) => {
self.cache.lock().insert(package_name.to_string(), package);
}
None => return Ok(None),
}
}
Ok(self.cache.lock().get(package_name).map(func))
}
pub fn get_test_scope_and_package_name_with_path_from_uri_path<'s>(
&self,
uri_path: &'s str,
) -> Option<(&'s str, &'s str)> {
let prefix1 = format!("/{}/", DENOTEST_SCOPE_NAME);
let prefix2 = format!("/{}%2f", DENOTEST_SCOPE_NAME);
let maybe_package_name_with_path = uri_path
.strip_prefix(&prefix1)
.or_else(|| uri_path.strip_prefix(&prefix2));
if let Some(package_name_with_path) = maybe_package_name_with_path {
return Some((DENOTEST_SCOPE_NAME, package_name_with_path));
}
let prefix1 = format!("/{}/", DENOTEST2_SCOPE_NAME);
let prefix2 = format!("/{}%2f", DENOTEST2_SCOPE_NAME);
let maybe_package_name_with_path = uri_path
.strip_prefix(&prefix1)
.or_else(|| uri_path.strip_prefix(&prefix2));
if let Some(package_name_with_path) = maybe_package_name_with_path {
return Some((DENOTEST2_SCOPE_NAME, package_name_with_path));
}
let prefix1 = format!("/{}/", DENOTEST3_SCOPE_NAME);
let prefix2 = format!("/{}%2f", DENOTEST3_SCOPE_NAME);
let maybe_package_name_with_path = uri_path
.strip_prefix(&prefix1)
.or_else(|| uri_path.strip_prefix(&prefix2));
if let Some(package_name_with_path) = maybe_package_name_with_path {
return Some((DENOTEST3_SCOPE_NAME, package_name_with_path));
}
let prefix1 = format!("/{}/", "@types");
let prefix2 = format!("/{}%2f", "@types");
let maybe_package_name_with_path = uri_path
.strip_prefix(&prefix1)
.or_else(|| uri_path.strip_prefix(&prefix2));
if let Some(package_name_with_path) = maybe_package_name_with_path
&& package_name_with_path.starts_with("denotest")
{
return Some(("@types", package_name_with_path));
}
let prefix1 = format!("/{}/", "@esbuild");
let prefix2 = format!("/{}%2f", "@esbuild");
let maybe_package_name_with_path = uri_path
.strip_prefix(&prefix1)
.or_else(|| uri_path.strip_prefix(&prefix2));
if let Some(package_name_with_path) = maybe_package_name_with_path {
return Some(("@esbuild", package_name_with_path));
}
None
}
}
// NOTE: extracted out partially from the `tar` crate, all credits to the original authors
fn append_dir_all<W: std::io::Write>(
builder: &mut tar::Builder<W>,
path: &Path,
src_path: &Path,
) -> Result<()> {
builder.follow_symlinks(true);
let mode = tar::HeaderMode::Deterministic;
builder.mode(mode);
let mut stack = vec![(src_path.to_path_buf(), true, false)];
let mut entries = Vec::new();
while let Some((src, is_dir, is_symlink)) = stack.pop() {
let dest = path.join(src.strip_prefix(src_path).unwrap());
// In case of a symlink pointing to a directory, is_dir is false, but src.is_dir() will return true
if is_dir || (is_symlink && src.is_dir()) {
for entry in fs::read_dir(&src)? {
let entry = entry?;
let file_type = entry.file_type()?;
stack.push((entry.path(), file_type.is_dir(), file_type.is_symlink()));
}
if dest != Path::new("") {
entries.push((src, dest));
}
} else {
entries.push((src, dest));
}
}
entries.sort_by(|(_, a), (_, b)| a.cmp(b));
for (src, dest) in entries {
let mut header = tar::Header::new_gnu();
let metadata = src.metadata().with_context(|| {
format!("trying to get metadata for {}", src.display())
})?;
header.set_metadata_in_mode(&metadata, mode);
// this is what `tar` sets the mtime to on unix in deterministic mode, on windows it uses a different
// value, which causes the tarball to have a different hash on windows. force it to be the same
// to ensure the same output on all platforms
header.set_mtime(1153704088);
let data = if src.is_file() {
Box::new(
fs::File::open(&src)
.with_context(|| format!("trying to open file {}", src.display()))?,
) as Box<dyn std::io::Read>
} else {
Box::new(std::io::empty()) as Box<dyn std::io::Read>
};
builder
.append_data(&mut header, dest, data)
.with_context(|| "appending data")?;
}
Ok(())
}
fn create_package_version_info(
version_folder: &PathRef,
version: &str,
package_name: &str,
registry_hostname: &str,
) -> Result<(Vec<u8>, serde_json::Map<String, serde_json::Value>)> {
let tarball_bytes = create_tarball_from_dir(version_folder.as_path())?;
let mut dist = serde_json::Map::new();
if package_name != "@denotest/no-shasums" {
let tarball_checksum = get_tarball_checksum(&tarball_bytes);
dist.insert(
"integrity".to_string(),
format!("sha512-{tarball_checksum}").into(),
);
dist.insert("shasum".to_string(), "dummy-value".into());
}
dist.insert(
"tarball".to_string(),
format!("{registry_hostname}/{package_name}/{version}.tgz").into(),
);
let package_json_path = version_folder.join("package.json");
let package_json_bytes = fs::read(&package_json_path).with_context(|| {
format!("Error reading package.json at {}", package_json_path)
})?;
let package_json_text = String::from_utf8_lossy(&package_json_bytes);
let mut version_info: serde_json::Map<String, serde_json::Value> =
serde_json::from_str(&package_json_text)?;
version_info.insert("dist".to_string(), dist.into());
// add a bin entry for a directories.bin package.json entry as this
// is what the npm registry does as well
if let Some(directories) = version_info.get("directories")
&& !version_info.contains_key("bin")
&& let Some(bin) = directories
.as_object()
.and_then(|o| o.get("bin"))
.and_then(|v| v.as_str())
{
let mut bins = serde_json::Map::new();
for entry in std::fs::read_dir(version_folder.join(bin))? {
let entry = entry?;
let file_name = entry.file_name().to_string_lossy().to_string();
bins.insert(
file_name.to_string(),
format!("{}/{}", bin, file_name).into(),
);
}
version_info.insert("bin".into(), bins.into());
}
Ok((tarball_bytes, version_info))
}
fn get_esbuild_platform_info(
platform_name: &str,
) -> Option<(&'static str, &'static str, bool)> {
match platform_name {
"linux-x64" => Some(("esbuild-x64", "linux64", false)),
"linux-arm64" => Some(("esbuild-aarch64", "linux64", false)),
"darwin-x64" => Some(("esbuild-x64", "mac", false)),
"darwin-arm64" => Some(("esbuild-aarch64", "mac", false)),
"win32-x64" => Some(("esbuild-x64.exe", "win", true)),
"win32-arm64" => Some(("esbuild-arm64.exe", "win", true)),
_ => None,
}
}
fn setup_esbuild_binary(
package_dir: &Path,
esbuild_prebuilt: &Path,
is_windows: bool,
) -> Result<&'static str> {
let binary_name = if is_windows { "esbuild.exe" } else { "esbuild" };
if is_windows {
std::fs::copy(esbuild_prebuilt, package_dir.join(binary_name))?;
Ok(binary_name)
} else {
let bin_dir = package_dir.join("bin");
std::fs::create_dir_all(&bin_dir)?;
let binary_path = bin_dir.join(binary_name);
std::fs::copy(esbuild_prebuilt, &binary_path)?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = std::fs::metadata(&binary_path)?.permissions();
perms.set_mode(0o755); // rwxr-xr-x
std::fs::set_permissions(&binary_path, perms)?;
}
Ok("bin/esbuild")
}
}
fn create_tarball_from_dir(package_dir: &Path) -> Result<Vec<u8>> {
let mut tarball_bytes = Vec::new();
{
let mut encoder =
GzEncoder::new(&mut tarball_bytes, Compression::default());
{
let mut builder = Builder::new(&mut encoder);
append_dir_all(&mut builder, Path::new("package"), package_dir)?;
builder.finish()?;
}
encoder.finish()?;
}
Ok(tarball_bytes)
}
fn create_npm_registry_response(
package_name: &str,
version: &str,
description: &str,
bin_path: &str,
tarball_bytes: Vec<u8>,
registry_hostname: &str,
) -> Result<CustomNpmPackage> {
let tarball_checksum = get_tarball_checksum(&tarball_bytes);
let mut dist = serde_json::Map::new();
dist.insert(
"integrity".to_string(),
format!("sha512-{tarball_checksum}").into(),
);
dist.insert("shasum".to_string(), "dummy-value".into());
dist.insert(
"tarball".to_string(),
format!("{registry_hostname}/{package_name}/{version}.tgz").into(),
);
let mut version_info = serde_json::Map::new();
version_info.insert("name".to_string(), package_name.into());
version_info.insert("version".to_string(), version.into());
version_info.insert("description".to_string(), description.into());
version_info.insert("bin".to_string(), bin_path.into());
version_info.insert("dist".to_string(), dist.into());
let mut versions = serde_json::Map::new();
versions.insert(version.to_string(), version_info.into());
let mut dist_tags = serde_json::Map::new();
dist_tags.insert("latest".to_string(), version.into());
let mut registry_file = serde_json::Map::new();
registry_file.insert("name".to_string(), package_name.into());
registry_file.insert("versions".to_string(), versions.into());
registry_file.insert("dist-tags".to_string(), dist_tags.into());
let mut tarballs = HashMap::new();
tarballs.insert(version.to_string(), tarball_bytes);
Ok(CustomNpmPackage {
registry_file: serde_json::to_string(®istry_file)?,
tarballs,
})
}
fn create_esbuild_package(
registry_hostname: &str,
package_name: &str,
) -> Result<Option<CustomNpmPackage>> {
let platform_name = package_name.strip_prefix("@esbuild/").unwrap();
let (bin_name, folder, is_windows) =
match get_esbuild_platform_info(platform_name) {
Some(info) => info,
None => return Ok(None),
};
let esbuild_prebuilt = root_path()
.join("third_party/prebuilt")
.join(folder)
.join(bin_name);
if !esbuild_prebuilt.exists() {
return Ok(None);
}
let temp_dir = tempfile::tempdir()?;
let package_dir = temp_dir.path().join("package");
std::fs::create_dir_all(&package_dir)?;
let bin_path =
setup_esbuild_binary(&package_dir, esbuild_prebuilt.as_path(), is_windows)?;
let package_json = serde_json::json!({
"name": package_name,
"version": ESBUILD_VERSION,
"description": format!("The {} binary for esbuild", platform_name),
"bin": bin_path
});
std::fs::write(
package_dir.join("package.json"),
serde_json::to_string_pretty(&package_json)?,
)?;
let tarball_bytes = create_tarball_from_dir(&package_dir)?;
let package = create_npm_registry_response(
package_name,
ESBUILD_VERSION,
&format!("The {} binary for esbuild", platform_name),
bin_path,
tarball_bytes,
registry_hostname,
)?;
Ok(Some(package))
}
fn get_npm_package(
registry_hostname: &str,
local_path: &str,
package_name: &str,
) -> Result<Option<CustomNpmPackage>> {
if package_name.starts_with("@esbuild/")
&& let Some(esbuild_package) =
create_esbuild_package(registry_hostname, package_name)?
{
return Ok(Some(esbuild_package));
}
let registry_hostname = if package_name == "@denotest/tarballs-privateserver2"
{
"http://localhost:4262"
} else {
registry_hostname
};
let package_folder = tests_path()
.join("registry")
.join(local_path)
.join(package_name);
if !package_folder.exists() {
return Ok(None);
}
// read all the package's versions
let mut tarballs = HashMap::new();
let mut versions = serde_json::Map::new();
let mut latest_version = semver::Version::parse("0.0.0").unwrap();
let mut dist_tags = serde_json::Map::new();
let mut time = serde_json::Map::new();
for entry in fs::read_dir(&package_folder)? {
let entry = entry?;
let file_type = entry.file_type()?;
if !file_type.is_dir() {
continue;
}
let version = entry.file_name().to_string_lossy().into_owned();
let version_folder = package_folder.join(&version);
let (tarball_bytes, mut version_info) = create_package_version_info(
&version_folder,
&version,
package_name,
registry_hostname,
)?;
tarballs.insert(version.clone(), tarball_bytes);
if let Some(maybe_optional_deps) = version_info.get("optionalDependencies")
&& let Some(optional_deps) = maybe_optional_deps.as_object()
{
if let Some(maybe_deps) = version_info.get("dependencies") {
if let Some(deps) = maybe_deps.as_object() {
let mut cloned_deps = deps.to_owned();
for (key, value) in optional_deps {
cloned_deps.insert(key.to_string(), value.to_owned());
}
version_info.insert(
"dependencies".to_string(),
serde_json::to_value(cloned_deps).unwrap(),
);
}
} else {
version_info.insert(
"dependencies".to_string(),
serde_json::to_value(optional_deps).unwrap(),
);
}
}
if let Some(publish_config) = version_info.get("publishConfig")
&& let Some(tag) = publish_config.get("tag")
&& let Some(tag) = tag.as_str()
{
dist_tags.insert(tag.to_string(), version.clone().into());
}
if let Some(date) = version_info.get("publishDate") {
time.insert(version.clone(), date.clone());
}
versions.insert(version.clone(), version_info.into());
let version = semver::Version::parse(&version)?;
if version.cmp(&latest_version).is_gt() {
latest_version = version;
}
}
if !dist_tags.contains_key("latest") {
dist_tags.insert("latest".to_string(), latest_version.to_string().into());
}
// create the registry file for this package
let mut registry_file = serde_json::Map::new();
registry_file.insert("name".to_string(), package_name.to_string().into());
registry_file.insert("versions".to_string(), versions.into());
registry_file.insert("dist-tags".to_string(), dist_tags.into());
registry_file.insert("time".to_string(), time.into());
Ok(Some(CustomNpmPackage {
registry_file: serde_json::to_string(®istry_file).unwrap(),
tarballs,
}))
}
fn get_tarball_checksum(bytes: &[u8]) -> String {
use sha2::Digest;
let mut hasher = sha2::Sha512::new();
hasher.update(bytes);
BASE64_STANDARD.encode(hasher.finalize())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/https.rs | tests/util/server/src/https.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io;
use std::num::NonZeroUsize;
use std::result::Result;
use std::sync::Arc;
use anyhow::anyhow;
use futures::Stream;
use futures::StreamExt;
use rustls_tokio_stream::TlsStream;
use rustls_tokio_stream::rustls;
use rustls_tokio_stream::rustls::pki_types::CertificateDer;
use rustls_tokio_stream::rustls::pki_types::PrivateKeyDer;
use tokio::net::TcpStream;
use crate::get_tcp_listener_stream;
use crate::testdata_path;
pub const TLS_BUFFER_SIZE: Option<NonZeroUsize> = NonZeroUsize::new(65536);
#[derive(Default)]
pub enum SupportedHttpVersions {
#[default]
All,
Http1Only,
Http2Only,
}
pub fn get_tls_listener_stream_from_tcp(
tls_config: Arc<rustls::ServerConfig>,
mut tcp: impl Stream<Item = Result<TcpStream, std::io::Error>> + Unpin + 'static,
) -> impl Stream<Item = Result<TlsStream<TcpStream>, std::io::Error>> + Unpin {
async_stream::stream! {
while let Some(result) = tcp.next().await {
match result {
Ok(tcp) => yield Ok(TlsStream::new_server_side(tcp, tls_config.clone(), TLS_BUFFER_SIZE)),
Err(e) => yield Err(e),
};
}
}.boxed_local()
}
pub async fn get_tls_listener_stream(
name: &'static str,
port: u16,
http: SupportedHttpVersions,
) -> impl Stream<Item = Result<TlsStream<TcpStream>, std::io::Error>> + Unpin {
let cert_file = "tls/localhost.crt";
let key_file = "tls/localhost.key";
let ca_cert_file = "tls/RootCA.pem";
let tls_config =
get_tls_config(cert_file, key_file, ca_cert_file, http).unwrap();
let tcp = get_tcp_listener_stream(name, port).await;
get_tls_listener_stream_from_tcp(tls_config, tcp)
}
pub fn get_tls_config(
cert: &str,
key: &str,
ca: &str,
http_versions: SupportedHttpVersions,
) -> io::Result<Arc<rustls::ServerConfig>> {
let cert_path = testdata_path().join(cert);
let key_path = testdata_path().join(key);
let ca_path = testdata_path().join(ca);
let cert_file = std::fs::File::open(cert_path)?;
let key_file = std::fs::File::open(key_path)?;
let ca_file = std::fs::File::open(ca_path)?;
let certs_result: Result<Vec<CertificateDer<'static>>, io::Error> = {
let mut cert_reader = io::BufReader::new(cert_file);
rustls_pemfile::certs(&mut cert_reader).collect()
};
let certs = certs_result?;
let mut ca_cert_reader = io::BufReader::new(ca_file);
let ca_cert = rustls_pemfile::certs(&mut ca_cert_reader)
.collect::<Vec<_>>()
.remove(0)?;
let mut key_reader = io::BufReader::new(key_file);
let key = {
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut key_reader)
.collect::<Result<Vec<_>, _>>()?;
let rsa_keys = rustls_pemfile::rsa_private_keys(&mut key_reader)
.collect::<Result<Vec<_>, _>>()?;
if !pkcs8_keys.is_empty() {
let key = pkcs8_keys[0].clone_key();
Some(PrivateKeyDer::from(key))
} else if !rsa_keys.is_empty() {
let key = rsa_keys[0].clone_key();
Some(PrivateKeyDer::from(key))
} else {
None
}
};
match key {
Some(key) => {
let mut root_cert_store = rustls::RootCertStore::empty();
root_cert_store.add(ca_cert).unwrap();
// Allow (but do not require) client authentication.
let client_verifier = rustls::server::WebPkiClientVerifier::builder(
Arc::new(root_cert_store),
)
.allow_unauthenticated()
.build()
.unwrap();
let mut config = rustls::ServerConfig::builder()
.with_client_cert_verifier(client_verifier)
.with_single_cert(certs, key)
.map_err(|e| anyhow!("Error setting cert: {:?}", e))
.unwrap();
match http_versions {
SupportedHttpVersions::All => {
config.alpn_protocols = vec!["h2".into(), "http/1.1".into()];
}
SupportedHttpVersions::Http1Only => {}
SupportedHttpVersions::Http2Only => {
config.alpn_protocols = vec!["h2".into()];
}
}
Ok(Arc::new(config))
}
None => Err(io::Error::other("Cannot find key")),
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/test_runner.rs | tests/util/server/src/test_runner.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::io::IsTerminal;
use std::io::Write;
use std::num::NonZeroUsize;
use std::sync::Arc;
use std::sync::mpsc::RecvTimeoutError;
use std::sync::mpsc::channel;
use std::time::Duration;
use std::time::Instant;
use console_static_text::ConsoleStaticText;
use file_test_runner::RunOptions;
use file_test_runner::TestResult;
use file_test_runner::reporter::LogReporter;
use parking_lot::Mutex;
use serde::Serialize;
use crate::IS_CI;
use crate::colors;
use crate::semaphore::Semaphore;
/// Tracks the number of times each test has been flaky
pub struct FlakyTestTracker {
flaky_counts: Mutex<HashMap<String, usize>>,
}
impl FlakyTestTracker {
pub fn record_flaky(&self, test_name: &str) {
let mut counts = self.flaky_counts.lock();
*counts.entry(test_name.to_string()).or_insert(0) += 1;
}
pub fn get_count(&self, test_name: &str) -> usize {
let counts = self.flaky_counts.lock();
counts.get(test_name).copied().unwrap_or(0)
}
}
impl Default for FlakyTestTracker {
fn default() -> Self {
Self {
flaky_counts: Mutex::new(HashMap::new()),
}
}
}
pub fn flaky_test_ci(
test_name: &str,
flaky_test_tracker: &FlakyTestTracker,
parallelism: Option<&Parallelism>,
run_test: impl Fn() -> TestResult,
) -> TestResult {
run_maybe_flaky_test(
test_name,
*IS_CI,
flaky_test_tracker,
parallelism,
run_test,
)
}
struct SingleConcurrencyFlagGuard<'a>(&'a Parallelism);
impl<'a> Drop for SingleConcurrencyFlagGuard<'a> {
fn drop(&mut self) {
let mut value = self.0.has_raised_count.lock();
*value -= 1;
if *value == 0 {
self.0.semaphore.set_max(self.0.max_parallelism.get());
}
}
}
pub struct Parallelism {
semaphore: Semaphore,
max_parallelism: NonZeroUsize,
has_raised_count: Mutex<usize>,
}
impl Default for Parallelism {
fn default() -> Self {
let max_parallelism = RunOptions::default_parallelism();
Self {
max_parallelism,
semaphore: Semaphore::new(max_parallelism.get()),
has_raised_count: Default::default(),
}
}
}
impl Parallelism {
pub fn max_parallelism(&self) -> NonZeroUsize {
self.max_parallelism
}
fn acquire(&self) -> crate::semaphore::Permit<'_> {
self.semaphore.acquire()
}
fn raise_single_concurrency_flag(&self) -> SingleConcurrencyFlagGuard<'_> {
{
let mut value = self.has_raised_count.lock();
if *value == 0 {
self.semaphore.set_max(1);
}
*value += 1;
}
SingleConcurrencyFlagGuard(self)
}
}
pub fn run_maybe_flaky_test(
test_name: &str,
is_flaky: bool,
flaky_test_tracker: &FlakyTestTracker,
parallelism: Option<&Parallelism>,
main_action: impl Fn() -> TestResult,
) -> TestResult {
let ci_parallelism = parallelism.filter(|_| *IS_CI);
let action = || run_with_parallelism(ci_parallelism, &main_action);
if !is_flaky {
return action();
}
for i in 0..2 {
let result = action();
if !result.is_failed() {
return result;
}
flaky_test_tracker.record_flaky(test_name);
#[allow(clippy::print_stderr)]
if *IS_CI {
::std::eprintln!(
"{} {} was flaky on run {}",
colors::bold_red("Warning"),
colors::gray(test_name),
i,
);
}
std::thread::sleep(Duration::from_millis(100));
}
// on the CI, try running the test in isolation with no other tests running
#[allow(clippy::print_stderr)]
let _maybe_guard = if let Some(parallelism) = ci_parallelism {
let guard = parallelism.raise_single_concurrency_flag();
::std::eprintln!(
"{} {} was flaky. Temporarily reducing test concurrency to 1 and trying a few more times.",
colors::bold_red("***WARNING***"),
colors::gray(test_name)
);
for _ in 0..2 {
let result = action();
if !result.is_failed() {
return result;
}
flaky_test_tracker.record_flaky(test_name);
std::thread::sleep(Duration::from_millis(100));
}
Some(guard)
} else {
None
};
// surface result now
action()
}
fn run_with_parallelism(
parallelism: Option<&Parallelism>,
action: impl Fn() -> TestResult,
) -> TestResult {
let _maybe_permit = parallelism.map(|p| p.acquire());
let duration = std::time::Instant::now();
let result = action();
result.with_duration(duration.elapsed())
}
pub struct TestTimeoutHolder {
_tx: std::sync::mpsc::Sender<()>,
}
pub fn with_timeout(
test_name: String,
duration: Duration,
) -> TestTimeoutHolder {
let (tx, rx) = ::std::sync::mpsc::channel::<()>();
// ok to allow because we don't need to maintain logging context here
#[allow(clippy::disallowed_methods)]
std::thread::spawn(move || {
if rx.recv_timeout(duration)
== Err(::std::sync::mpsc::RecvTimeoutError::Timeout)
{
use std::io::Write;
#[allow(clippy::print_stderr)]
{
::std::eprintln!(
"Test {test_name} timed out after {} seconds, aborting",
duration.as_secs()
);
}
_ = std::io::stderr().flush();
#[allow(clippy::disallowed_methods)]
::std::process::exit(1);
}
});
TestTimeoutHolder { _tx: tx }
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
struct RecordedTestResult {
name: String,
path: String,
#[serde(skip_serializing_if = "Option::is_none")]
duration: Option<u128>,
#[serde(skip_serializing_if = "is_false")]
failed: bool,
#[serde(skip_serializing_if = "is_false")]
ignored: bool,
#[serde(skip_serializing_if = "is_zero")]
flaky_count: usize,
#[serde(skip_serializing_if = "Vec::is_empty")]
sub_tests: Vec<RecordedTestResult>,
}
fn is_false(value: &bool) -> bool {
!value
}
fn is_zero(value: &usize) -> bool {
*value == 0
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct RecordedReport {
tests: Vec<RecordedTestResult>,
}
struct JsonReporter {
data: Arc<Mutex<RecordedReport>>,
flaky_tracker: Arc<FlakyTestTracker>,
test_module_name: String,
}
impl JsonReporter {
pub fn new(
flaky_tracker: Arc<FlakyTestTracker>,
test_module_name: String,
) -> Self {
Self {
data: Default::default(),
flaky_tracker,
test_module_name,
}
}
fn write_report_to_file(&self) {
let json = {
let data = self.data.lock();
serde_json::to_string(&*data).unwrap()
};
let file_path = crate::root_path()
.join("target")
.join(format!("test_results_{}.json", self.test_module_name));
file_path.write(json);
}
fn flatten_and_record_test(
&self,
tests: &mut Vec<RecordedTestResult>,
test_name: String,
path: String,
result: &TestResult,
main_duration: Option<Duration>,
) {
match result {
TestResult::SubTests {
sub_tests,
duration,
} => {
let mut sub_test_results = Vec::with_capacity(sub_tests.len());
for sub_test in sub_tests {
let full_name = format!("{}::{}", test_name, sub_test.name);
self.flatten_and_record_test(
&mut sub_test_results,
full_name,
path.clone(),
&sub_test.result,
None,
);
}
let flaky_count = self.flaky_tracker.get_count(&test_name);
tests.push(RecordedTestResult {
name: test_name,
path,
duration: duration.or(main_duration).map(|d| d.as_millis()),
failed: sub_tests.iter().any(|s| s.result.is_failed()),
ignored: false,
flaky_count,
sub_tests: sub_test_results,
})
}
TestResult::Passed { duration } => {
let flaky_count = self.flaky_tracker.get_count(&test_name);
let test_result = RecordedTestResult {
name: test_name,
path,
duration: duration.or(main_duration).map(|d| d.as_millis()),
failed: false,
ignored: false,
flaky_count,
sub_tests: Vec::new(),
};
tests.push(test_result);
}
TestResult::Failed { duration, .. } => {
let flaky_count = self.flaky_tracker.get_count(&test_name);
let test_result = RecordedTestResult {
name: test_name,
path,
duration: duration.or(main_duration).map(|d| d.as_millis()),
failed: true,
ignored: false,
flaky_count,
sub_tests: Vec::new(),
};
tests.push(test_result.clone());
}
TestResult::Ignored => {
let flaky_count = self.flaky_tracker.get_count(&test_name);
let test_result = RecordedTestResult {
name: test_name,
path,
duration: None,
failed: false,
ignored: true,
flaky_count,
sub_tests: Vec::new(),
};
tests.push(test_result);
}
}
}
}
impl<TData> file_test_runner::reporter::Reporter<TData> for JsonReporter {
fn report_category_start(
&self,
_category: &file_test_runner::collection::CollectedTestCategory<TData>,
_context: &file_test_runner::reporter::ReporterContext,
) {
}
fn report_category_end(
&self,
_category: &file_test_runner::collection::CollectedTestCategory<TData>,
_context: &file_test_runner::reporter::ReporterContext,
) {
}
fn report_test_start(
&self,
_test: &file_test_runner::collection::CollectedTest<TData>,
_context: &file_test_runner::reporter::ReporterContext,
) {
}
fn report_test_end(
&self,
test: &file_test_runner::collection::CollectedTest<TData>,
duration: Duration,
result: &TestResult,
_context: &file_test_runner::reporter::ReporterContext,
) {
let mut data = self.data.lock();
let relative_path = test
.path
.strip_prefix(crate::root_path())
.unwrap_or(&test.path);
let path = match test.line_and_column {
Some((line, col)) => {
format!("{}:{}:{}", relative_path.display(), line + 1, col + 1)
}
None => relative_path.display().to_string(),
}
.replace("\\", "/");
// Use the helper function to recursively flatten subtests
self.flatten_and_record_test(
&mut data.tests,
test.name.to_string(),
path,
result,
Some(duration),
);
}
fn report_failures(
&self,
_failures: &[file_test_runner::reporter::ReporterFailure<TData>],
_total_tests: usize,
) {
// Write the report to file when failures are reported (at the end of test run)
self.write_report_to_file();
}
}
pub trait ReporterData {
fn times_flaky() -> usize;
}
pub fn get_test_reporter<TData: 'static>(
test_module_name: &str,
flaky_test_tracker: Arc<FlakyTestTracker>,
) -> Arc<dyn file_test_runner::reporter::Reporter<TData>> {
let mut reporters: Vec<Box<dyn file_test_runner::reporter::Reporter<TData>>> =
Vec::with_capacity(2);
reporters.push(get_display_reporter());
if *IS_CI {
reporters.push(Box::new(JsonReporter::new(
flaky_test_tracker,
test_module_name.to_string(),
)));
}
Arc::new(file_test_runner::reporter::AggregateReporter::new(
reporters,
))
}
fn get_display_reporter<TData>()
-> Box<dyn file_test_runner::reporter::Reporter<TData>> {
if *file_test_runner::NO_CAPTURE
|| *IS_CI
|| !std::io::stderr().is_terminal()
|| std::env::var("DENO_TEST_UTIL_REPORTER").ok().as_deref() == Some("log")
{
Box::new(file_test_runner::reporter::LogReporter::default())
} else {
Box::new(PtyReporter::new())
}
}
struct PtyReporterPendingTest {
name: String,
start_time: Instant,
}
struct PtyReporterFailedTest {
name: String,
path: String,
}
struct PtyReporterData {
static_text: ConsoleStaticText,
pending_tests: Vec<PtyReporterPendingTest>,
failed_tests: Vec<PtyReporterFailedTest>,
passed_tests: usize,
ignored_tests: usize,
}
impl PtyReporterData {
pub fn render_clear(&mut self) -> String {
self.static_text.render_clear().unwrap_or_default()
}
pub fn render(&mut self) -> Option<String> {
let mut items = Vec::new();
const MAX_ITEM_DISPLAY: usize = 10;
if !self.pending_tests.is_empty() {
let text = if self.pending_tests.len() > MAX_ITEM_DISPLAY {
"oldest pending:"
} else {
"pending:"
};
items.push(console_static_text::TextItem::Text(
colors::yellow(text).into(),
));
items.extend(self.pending_tests.iter().take(MAX_ITEM_DISPLAY).map(
|item| {
console_static_text::TextItem::Text(
format!(
"- {} ({}s)",
item.name,
item.start_time.elapsed().as_secs()
)
.into(),
)
},
));
}
if !self.failed_tests.is_empty() {
items.push(console_static_text::TextItem::Text(
colors::red("failed:").to_string().into(),
));
for item in self.failed_tests.iter().rev().take(MAX_ITEM_DISPLAY) {
items.push(console_static_text::TextItem::Text(
format!("- {} ({})", item.name, colors::gray(&item.path)).into(),
));
}
}
items.push(console_static_text::TextItem::Text(
format!(
" {} Pending - {} Passed - {} Failed - {} Ignored",
self.pending_tests.len(),
self.passed_tests,
self.failed_tests.len(),
self.ignored_tests
)
.into(),
));
self.static_text.render_items(items.iter())
}
}
struct PtyReporter {
data: Arc<Mutex<PtyReporterData>>,
_tx: std::sync::mpsc::Sender<()>,
}
impl PtyReporter {
pub fn new() -> Self {
let (tx, rx) = channel();
let data = Arc::new(Mutex::new(PtyReporterData {
static_text: ConsoleStaticText::new(move || {
let size = crossterm::terminal::size().ok();
console_static_text::ConsoleSize {
cols: size.map(|(cols, _)| cols),
rows: size.map(|(_, rows)| rows),
}
}),
pending_tests: Default::default(),
failed_tests: Default::default(),
passed_tests: Default::default(),
ignored_tests: Default::default(),
}));
#[allow(clippy::disallowed_methods)]
std::thread::spawn({
let data = data.clone();
move || {
loop {
match rx.recv_timeout(Duration::from_millis(1_000)) {
Err(RecvTimeoutError::Timeout) => {
let mut data = data.lock();
if let Some(text) = data.render() {
let mut stderr = std::io::stderr().lock();
_ = stderr.write_all(text.as_bytes());
_ = stderr.flush();
}
}
_ => {
return;
}
}
}
}
});
Self { data, _tx: tx }
}
}
impl<TData> file_test_runner::reporter::Reporter<TData> for PtyReporter {
fn report_category_start(
&self,
category: &file_test_runner::collection::CollectedTestCategory<TData>,
_context: &file_test_runner::reporter::ReporterContext,
) {
let mut data = self.data.lock();
let mut final_text = data.render_clear().into_bytes();
_ = LogReporter::write_report_category_start(&mut final_text, category);
if let Some(text) = data.render() {
final_text.extend_from_slice(text.as_bytes());
}
let mut stderr = std::io::stderr().lock();
_ = stderr.write_all(&final_text);
_ = stderr.flush();
}
fn report_category_end(
&self,
_category: &file_test_runner::collection::CollectedTestCategory<TData>,
_context: &file_test_runner::reporter::ReporterContext,
) {
}
fn report_test_start(
&self,
test: &file_test_runner::collection::CollectedTest<TData>,
_context: &file_test_runner::reporter::ReporterContext,
) {
let mut data = self.data.lock();
data.pending_tests.push(PtyReporterPendingTest {
name: test.name.clone(),
start_time: std::time::Instant::now(),
});
if let Some(final_text) = data.render() {
let mut stderr = std::io::stderr().lock();
_ = stderr.write_all(final_text.as_bytes());
_ = stderr.flush();
}
}
fn report_test_end(
&self,
test: &file_test_runner::collection::CollectedTest<TData>,
duration: Duration,
result: &TestResult,
_context: &file_test_runner::reporter::ReporterContext,
) {
let mut data = self.data.lock();
let clear_text = data.static_text.render_clear().unwrap_or_default();
if let Some(index) =
data.pending_tests.iter().position(|t| t.name == test.name)
{
data.pending_tests.remove(index);
}
match result {
TestResult::Passed { .. } => {
data.passed_tests += 1;
}
TestResult::Ignored => {
data.ignored_tests += 1;
}
TestResult::Failed { .. } => {
data.failed_tests.push(PtyReporterFailedTest {
name: test.name.to_string(),
path: match test.line_and_column {
Some((line, col)) => {
format!("{}:{}:{}", test.path.display(), line + 1, col + 1)
}
None => test.path.display().to_string(),
},
});
}
TestResult::SubTests { .. } => {
// ignore
}
}
let mut final_text = clear_text.into_bytes();
_ = LogReporter::write_report_test_end(
&mut final_text,
test,
duration,
result,
&file_test_runner::reporter::ReporterContext { is_parallel: true },
);
if let Some(text) = data.render() {
final_text.extend_from_slice(text.as_bytes());
}
let mut stderr = std::io::stderr().lock();
_ = stderr.write_all(&final_text);
_ = stderr.flush();
}
fn report_failures(
&self,
failures: &[file_test_runner::reporter::ReporterFailure<TData>],
total_tests: usize,
) {
let clear_text = self
.data
.lock()
.static_text
.render_clear()
.unwrap_or_default();
let mut final_text = clear_text.into_bytes();
_ = LogReporter::write_report_failures(
&mut final_text,
failures,
total_tests,
);
let mut stderr = std::io::stderr().lock();
_ = stderr.write_all(&final_text);
_ = stderr.flush();
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/assertions.rs | tests/util/server/src/assertions.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::Write;
use crate::colors;
#[macro_export]
macro_rules! assert_starts_with {
($string:expr, $($test:expr),+) => {
let string = $string; // This might be a function call or something
if !($(string.starts_with($test))||+) {
panic!("{:?} does not start with {:?}", string, [$($test),+]);
}
}
}
#[macro_export]
macro_rules! assert_ends_with {
($left:expr, $right:expr $(,)?) => {
match (&$left, &$right) {
(actual, expected) => {
let actual = if expected.len() > actual.len() {
actual
} else {
&actual[actual.len() - expected.len()..]
};
pretty_assertions::assert_eq!(
actual,
*expected,
"should end with expected."
);
}
}
};
}
#[macro_export]
macro_rules! assert_contains {
($string:expr, $($test:expr),+ $(,)?) => {
let string = &$string; // This might be a function call or something
if !($(string.contains($test))||+) {
panic!("{:?} does not contain any of {:?}", string, [$($test),+]);
}
}
}
#[macro_export]
macro_rules! assert_not_contains {
($string:expr, $($test:expr),+ $(,)?) => {
let string = &$string; // This might be a function call or something
if !($(!string.contains($test))||+) {
panic!("{:?} contained {:?}", string, [$($test),+]);
}
}
}
#[track_caller]
pub fn assert_wildcard_match(actual: &str, expected: &str) {
assert_wildcard_match_with_logger(actual, expected, &mut std::io::stderr())
}
#[track_caller]
pub fn assert_wildcard_match_with_logger(
actual: &str,
expected: &str,
logger: &mut dyn Write,
) {
if !expected.contains("[WILD")
&& !expected.contains("[UNORDERED_START]")
&& !expected.contains("[#")
{
pretty_assertions::assert_eq!(actual, expected);
} else {
match crate::wildcard_match_detailed(expected, actual) {
crate::WildcardMatchResult::Success => {
// ignore
}
crate::WildcardMatchResult::Fail(debug_output) => {
writeln!(
logger,
"{}{}{}",
colors::bold("-- "),
colors::bold_red("OUTPUT"),
colors::bold(" START --"),
)
.unwrap();
writeln!(logger, "{}", actual).unwrap();
writeln!(logger, "{}", colors::bold("-- OUTPUT END --")).unwrap();
writeln!(
logger,
"{}{}{}",
colors::bold("-- "),
colors::bold_green("EXPECTED"),
colors::bold(" START --"),
)
.unwrap();
writeln!(logger, "{}", expected).unwrap();
writeln!(logger, "{}", colors::bold("-- EXPECTED END --")).unwrap();
writeln!(
logger,
"{}{}{}",
colors::bold("-- "),
colors::bold_blue("DEBUG"),
colors::bold(" START --"),
)
.unwrap();
writeln!(logger, "{debug_output}").unwrap();
writeln!(logger, "{}", colors::bold("-- DEBUG END --")).unwrap();
panic!("pattern match failed");
}
}
}
}
/// Asserts that the actual `serde_json::Value` is equal to the expected `serde_json::Value`, but
/// only for the keys present in the expected value.
///
/// # Example
///
/// ```
/// # use serde_json::json;
/// # use test_server::assertions::assert_json_subset;
/// assert_json_subset(json!({"a": 1, "b": 2}), json!({"a": 1}));
///
/// // Arrays are compared element by element
/// assert_json_subset(json!([{ "a": 1, "b": 2 }, {}]), json!([{"a": 1}, {}]));
/// ```
#[track_caller]
pub fn assert_json_subset(
actual: serde_json::Value,
expected: serde_json::Value,
) {
match (actual, expected) {
(
serde_json::Value::Object(actual),
serde_json::Value::Object(expected),
) => {
for (k, v) in expected.iter() {
let Some(actual_v) = actual.get(k) else {
panic!("Key {k:?} not found in actual value ({actual:#?})");
};
assert_json_subset(actual_v.clone(), v.clone());
}
}
(serde_json::Value::Array(actual), serde_json::Value::Array(expected)) => {
for (i, v) in expected.iter().enumerate() {
assert_json_subset(actual[i].clone(), v.clone());
}
}
(actual, expected) => {
assert_eq!(actual, expected);
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/parsers.rs | tests/util/server/src/parsers.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use lazy_regex::Lazy;
use regex::Regex;
use serde::Serialize;
pub struct WrkOutput {
pub latency: f64,
pub requests: u64,
}
pub fn parse_wrk_output(output: &str) -> WrkOutput {
static REQUESTS_RX: Lazy<Regex> =
lazy_regex::lazy_regex!(r"Requests/sec:\s+(\d+)");
static LATENCY_RX: Lazy<Regex> =
lazy_regex::lazy_regex!(r"\s+99%(?:\s+(\d+.\d+)([a-z]+))");
let mut requests = None;
let mut latency = None;
for line in output.lines() {
if requests.is_none()
&& let Some(cap) = REQUESTS_RX.captures(line)
{
requests = Some(str::parse::<u64>(cap.get(1).unwrap().as_str()).unwrap());
}
if latency.is_none()
&& let Some(cap) = LATENCY_RX.captures(line)
{
let time = cap.get(1).unwrap();
let unit = cap.get(2).unwrap();
latency = Some(
str::parse::<f64>(time.as_str()).unwrap()
* match unit.as_str() {
"ms" => 1.0,
"us" => 0.001,
"s" => 1000.0,
_ => unreachable!(),
},
);
}
}
WrkOutput {
requests: requests.unwrap(),
latency: latency.unwrap(),
}
}
#[derive(Debug, Clone, Serialize)]
pub struct StraceOutput {
pub percent_time: f64,
pub seconds: f64,
pub usecs_per_call: Option<u64>,
pub calls: u64,
pub errors: u64,
}
pub fn parse_strace_output(output: &str) -> HashMap<String, StraceOutput> {
let mut summary = HashMap::new();
// Filter out non-relevant lines. See the error log at
// https://github.com/denoland/deno/pull/3715/checks?check_run_id=397365887
// This is checked in testdata/strace_summary2.out
let mut lines = output.lines().filter(|line| {
!line.is_empty()
&& !line.contains("detached ...")
&& !line.contains("unfinished ...")
&& !line.contains("????")
});
let count = lines.clone().count();
if count < 4 {
return summary;
}
let total_line = lines.next_back().unwrap();
lines.next_back(); // Drop separator
let data_lines = lines.skip(2);
for line in data_lines {
let syscall_fields = line.split_whitespace().collect::<Vec<_>>();
let len = syscall_fields.len();
let syscall_name = syscall_fields.last().unwrap();
if (5..=6).contains(&len) {
summary.insert(
syscall_name.to_string(),
StraceOutput {
percent_time: str::parse::<f64>(syscall_fields[0]).unwrap(),
seconds: str::parse::<f64>(syscall_fields[1]).unwrap(),
usecs_per_call: Some(str::parse::<u64>(syscall_fields[2]).unwrap()),
calls: str::parse::<u64>(syscall_fields[3]).unwrap(),
errors: if syscall_fields.len() < 6 {
0
} else {
str::parse::<u64>(syscall_fields[4]).unwrap()
},
},
);
}
}
let total_fields = total_line.split_whitespace().collect::<Vec<_>>();
let mut usecs_call_offset = 0;
summary.insert(
"total".to_string(),
StraceOutput {
percent_time: str::parse::<f64>(total_fields[0]).unwrap(),
seconds: str::parse::<f64>(total_fields[1]).unwrap(),
usecs_per_call: if total_fields.len() > 5 {
usecs_call_offset = 1;
Some(str::parse::<u64>(total_fields[2]).unwrap())
} else {
None
},
calls: str::parse::<u64>(total_fields[2 + usecs_call_offset]).unwrap(),
errors: str::parse::<u64>(total_fields[3 + usecs_call_offset]).unwrap(),
},
);
summary
}
pub fn parse_max_mem(output: &str) -> Option<u64> {
// Takes the output from "time -v" as input and extracts the 'maximum
// resident set size' and returns it in bytes.
for line in output.lines() {
if line
.to_lowercase()
.contains("maximum resident set size (kbytes)")
{
let value = line.split(": ").nth(1).unwrap();
return Some(str::parse::<u64>(value).unwrap() * 1024);
}
}
None
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn parse_wrk_output_1() {
const TEXT: &str = include_str!("./testdata/wrk1.txt");
let wrk = parse_wrk_output(TEXT);
assert_eq!(wrk.requests, 1837);
assert!((wrk.latency - 6.25).abs() < f64::EPSILON);
}
#[test]
fn parse_wrk_output_2() {
const TEXT: &str = include_str!("./testdata/wrk2.txt");
let wrk = parse_wrk_output(TEXT);
assert_eq!(wrk.requests, 53435);
assert!((wrk.latency - 6.22).abs() < f64::EPSILON);
}
#[test]
fn parse_wrk_output_3() {
const TEXT: &str = include_str!("./testdata/wrk3.txt");
let wrk = parse_wrk_output(TEXT);
assert_eq!(wrk.requests, 96037);
assert!((wrk.latency - 6.36).abs() < f64::EPSILON);
}
#[test]
fn max_mem_parse() {
const TEXT: &str = include_str!("./testdata/time.out");
let size = parse_max_mem(TEXT);
assert_eq!(size, Some(120380 * 1024));
}
#[test]
fn strace_parse_1() {
const TEXT: &str = include_str!("./testdata/strace_summary.out");
let strace = parse_strace_output(TEXT);
// first syscall line
let munmap = strace.get("munmap").unwrap();
assert_eq!(munmap.calls, 60);
assert_eq!(munmap.errors, 0);
// line with errors
assert_eq!(strace.get("mkdir").unwrap().errors, 2);
// last syscall line
let prlimit = strace.get("prlimit64").unwrap();
assert_eq!(prlimit.calls, 2);
assert!((prlimit.percent_time - 0.0).abs() < f64::EPSILON);
// summary line
assert_eq!(strace.get("total").unwrap().calls, 704);
assert_eq!(strace.get("total").unwrap().errors, 5);
assert_eq!(strace.get("total").unwrap().usecs_per_call, None);
}
#[test]
fn strace_parse_2() {
const TEXT: &str = include_str!("./testdata/strace_summary2.out");
let strace = parse_strace_output(TEXT);
// first syscall line
let futex = strace.get("futex").unwrap();
assert_eq!(futex.calls, 449);
assert_eq!(futex.errors, 94);
// summary line
assert_eq!(strace.get("total").unwrap().calls, 821);
assert_eq!(strace.get("total").unwrap().errors, 107);
assert_eq!(strace.get("total").unwrap().usecs_per_call, None);
}
#[test]
fn strace_parse_3() {
const TEXT: &str = include_str!("./testdata/strace_summary3.out");
let strace = parse_strace_output(TEXT);
// first syscall line
let futex = strace.get("mprotect").unwrap();
assert_eq!(futex.calls, 90);
assert_eq!(futex.errors, 0);
// summary line
assert_eq!(strace.get("total").unwrap().calls, 543);
assert_eq!(strace.get("total").unwrap().errors, 36);
assert_eq!(strace.get("total").unwrap().usecs_per_call, Some(6));
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/print.rs | tests/util/server/src/print.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::io::Write;
use std::sync::Arc;
use std::thread::JoinHandle;
use parking_lot::Mutex;
thread_local! {
static OUTPUT_BUFFER: RefCell<Arc<Mutex<Option<Vec<u8>>>>> = RefCell::new(Arc::new(Mutex::new(None)));
}
/// Spawns a thread maintaining the output buffer for capturing printing.
pub fn spawn_thread<F, T>(f: F) -> JoinHandle<T>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
let captured_buffer = OUTPUT_BUFFER.with(|buffer| buffer.borrow().clone());
#[allow(clippy::disallowed_methods)]
std::thread::spawn(|| {
OUTPUT_BUFFER.with(|buffer| {
*buffer.borrow_mut() = captured_buffer;
});
f()
})
}
/// Print to stdout, or to the thread-local buffer if one is set
pub fn print_stdout(data: &[u8]) {
OUTPUT_BUFFER.with(|buffer_cell| {
{
let buffer = buffer_cell.borrow();
let mut buffer = buffer.lock();
if let Some(ref mut buf) = *buffer {
buf.extend_from_slice(data);
return;
}
}
let _ = std::io::stdout().write_all(data);
});
}
/// Print to stderr, or to the thread-local buffer if one is set
pub fn print_stderr(data: &[u8]) {
OUTPUT_BUFFER.with(|buffer_cell| {
{
let buffer = buffer_cell.borrow();
let mut buffer = buffer.lock();
if let Some(ref mut buf) = *buffer {
buf.extend_from_slice(data);
return;
}
}
let _ = std::io::stderr().write_all(data);
});
}
/// Capture output from a function, returning both the output and the function's result
pub fn with_captured_output<F, R>(f: F) -> (Vec<u8>, R)
where
F: FnOnce() -> R,
{
/// RAII guard that ensures the output buffer is cleaned up even on panic
struct CaptureGuard {
enabled: bool,
}
impl CaptureGuard {
fn new(enabled: bool) -> Self {
if enabled {
set_buffer(true);
}
Self { enabled }
}
}
impl Drop for CaptureGuard {
fn drop(&mut self) {
if self.enabled {
// Ensure buffer is disabled even on panic
set_buffer(false);
}
}
}
let should_capture = !*file_test_runner::NO_CAPTURE;
let _guard = CaptureGuard::new(should_capture);
let result = f();
let output = take_buffer();
(output, result)
}
fn set_buffer(enabled: bool) {
OUTPUT_BUFFER.with(|buffer| {
let buffer = buffer.borrow();
*buffer.lock() = if enabled { Some(Vec::new()) } else { None };
});
}
fn take_buffer() -> Vec<u8> {
OUTPUT_BUFFER.with(|buffer| buffer.borrow().lock().take().unwrap_or_default())
}
/// Print to stdout with a newline
#[macro_export]
macro_rules! println {
() => {
$crate::print::print_stdout(b"\n")
};
($($arg:tt)*) => {{
let mut msg = format!($($arg)*);
msg.push('\n');
$crate::print::print_stdout(msg.as_bytes());
}};
}
/// Print to stdout without a newline
#[macro_export]
macro_rules! print {
($($arg:tt)*) => {{
let msg = format!($($arg)*);
$crate::print::print_stdout(msg.as_bytes());
}};
}
/// Print to stderr with a newline
#[macro_export]
macro_rules! eprintln {
() => {
$crate::print::print_stderr(b"\n")
};
($($arg:tt)*) => {{
let mut msg = format!($($arg)*);
msg.push('\n');
$crate::print::print_stderr(msg.as_bytes());
}};
}
/// Print to stderr without a newline
#[macro_export]
macro_rules! eprint {
($($arg:tt)*) => {{
let msg = format!($($arg)*);
$crate::print::print_stderr(msg.as_bytes());
}};
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/servers/grpc.rs | tests/util/server/src/servers/grpc.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use futures::StreamExt;
use h2;
use hyper::header::HeaderName;
use hyper::header::HeaderValue;
use rustls_tokio_stream::TlsStream;
use tokio::net::TcpStream;
use tokio::task::LocalSet;
use super::SupportedHttpVersions;
use super::get_tcp_listener_stream;
use super::get_tls_listener_stream;
pub async fn h2_grpc_server(h2_grpc_port: u16, h2s_grpc_port: u16) {
let mut tcp = get_tcp_listener_stream("grpc", h2_grpc_port).await;
let mut tls = get_tls_listener_stream(
"grpc (tls)",
h2s_grpc_port,
SupportedHttpVersions::Http2Only,
)
.await;
async fn serve(socket: TcpStream) -> Result<(), anyhow::Error> {
let mut connection = h2::server::handshake(socket).await?;
while let Some(result) = connection.accept().await {
let (request, respond) = result?;
tokio::spawn(async move {
let _ = handle_request(request, respond).await;
});
}
Ok(())
}
async fn serve_tls(
socket: TlsStream<TcpStream>,
) -> Result<(), anyhow::Error> {
let mut connection = h2::server::handshake(socket).await?;
while let Some(result) = connection.accept().await {
let (request, respond) = result?;
tokio::spawn(async move {
let _ = handle_request(request, respond).await;
});
}
Ok(())
}
async fn handle_request(
mut request: hyper::Request<h2::RecvStream>,
mut respond: h2::server::SendResponse<bytes::Bytes>,
) -> Result<(), anyhow::Error> {
let body = request.body_mut();
let mut len = 0;
while let Some(data) = body.data().await {
let data = data?;
let _ = body.flow_control().release_capacity(data.len());
len += data.len();
}
let maybe_recv_trailers = body.trailers().await?;
let response = hyper::Response::new(());
let mut send = respond.send_response(response, false)?;
send.send_data(bytes::Bytes::from_static(b"hello "), false)?;
send.send_data(bytes::Bytes::from_static(b"world\n"), false)?;
let mut trailers = hyper::HeaderMap::new();
trailers.insert(
HeaderName::from_static("abc"),
HeaderValue::from_static("def"),
);
trailers.insert(
HeaderName::from_static("opr"),
HeaderValue::from_static("stv"),
);
trailers.insert(
HeaderName::from_static("req_body_len"),
HeaderValue::from(len),
);
if let Some(recv_trailers) = maybe_recv_trailers {
for (key, value) in recv_trailers {
trailers.insert(key.unwrap(), value);
}
}
send.send_trailers(trailers)?;
Ok(())
}
let local_set = LocalSet::new();
local_set.spawn_local(async move {
while let Some(Ok(tcp)) = tcp.next().await {
tokio::spawn(async move {
let _ = serve(tcp).await;
});
}
});
local_set.spawn_local(async move {
while let Some(Ok(tls)) = tls.next().await {
tokio::spawn(async move {
let _ = serve_tls(tls).await;
});
}
});
local_set.await;
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/servers/socket_dev.rs | tests/util/server/src/servers/socket_dev.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::convert::Infallible;
use std::future::Future;
use std::net::SocketAddr;
use bytes::Bytes;
use futures::FutureExt;
use futures::future::LocalBoxFuture;
use http_body_util::combinators::UnsyncBoxBody;
use hyper::Request;
use hyper::Response;
use hyper::StatusCode;
use percent_encoding;
use serde_json::json;
use super::ServerKind;
use super::ServerOptions;
use super::empty_body;
use super::hyper_utils::HandlerOutput;
use super::run_server;
use super::string_body;
pub fn api(port: u16) -> Vec<LocalBoxFuture<'static, ()>> {
run_socket_dev_server(port, "socket.dev server error", socket_dev_handler)
}
fn run_socket_dev_server<F, S>(
port: u16,
error_msg: &'static str,
handler: F,
) -> Vec<LocalBoxFuture<'static, ()>>
where
F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
S: Future<Output = HandlerOutput> + 'static,
{
let socket_dev_addr = SocketAddr::from(([127, 0, 0, 1], port));
vec![
run_socket_dev_server_for_addr(socket_dev_addr, error_msg, handler)
.boxed_local(),
]
}
async fn run_socket_dev_server_for_addr<F, S>(
addr: SocketAddr,
error_msg: &'static str,
handler: F,
) where
F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
S: Future<Output = HandlerOutput> + 'static,
{
run_server(
ServerOptions {
addr,
kind: ServerKind::Auto,
error_msg,
},
handler,
)
.await
}
async fn socket_dev_handler(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let path = req.uri().path();
let method = req.method();
// Handle authenticated mode: POST /v0/purl
if method == hyper::Method::POST {
return handle_authenticated_request(req).await;
}
// Expected format: /purl/{percent_encoded_purl}
// where purl is like: pkg:npm/package-name@version
if !path.starts_with("/purl/") {
return Ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(empty_body())?,
);
}
// Extract the percent-encoded purl
let encoded_purl = &path[6..]; // Skip "/purl/"
// Decode the percent-encoded purl
let decoded_purl =
match percent_encoding::percent_decode_str(encoded_purl).decode_utf8() {
Ok(s) => s.to_string(),
Err(_) => {
return Ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(empty_body())?,
);
}
};
// Parse the purl format: pkg:npm/package-name@version
if !decoded_purl.starts_with("pkg:npm/") {
return Ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(empty_body())?,
);
}
let package_part = &decoded_purl[8..]; // Skip "pkg:npm/"
// Split by @ to get name and version (split from the right to handle scoped packages like @scope/package@1.0.0)
let parts: Vec<&str> = package_part.rsplitn(2, '@').collect();
if parts.len() != 2 {
return Ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(empty_body())?,
);
}
let version = parts[0];
let name = parts[1];
// Create the response JSON matching the FirewallResponse structure
let response_json = json!({
"id": "81646",
"name": name,
"version": version,
"score": {
"license": 1.0,
"maintenance": 0.77,
"overall": 0.77,
"quality": 0.94,
"supplyChain": 1.0,
"vulnerability": 1.0
},
"alerts": [
{ "type": "malware", "action": "error", "severity": "critical", "category": "supplyChainRisk" }
]
});
let response_body = response_json.to_string();
Ok(
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(string_body(&response_body))?,
)
}
async fn handle_authenticated_request(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
use http_body_util::BodyExt;
// Read the request body
let body_bytes = req.collect().await?.to_bytes();
let body_str = String::from_utf8(body_bytes.to_vec())?;
// Parse the JSON body
let body_json: serde_json::Value = serde_json::from_str(&body_str)?;
let components = body_json["components"]
.as_array()
.ok_or_else(|| anyhow::anyhow!("Missing components array"))?;
// Build newline-delimited JSON response
let mut responses = Vec::new();
for component in components {
let purl = component["purl"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Missing purl field"))?;
// Parse the purl format: pkg:npm/package-name@version
if !purl.starts_with("pkg:npm/") {
continue;
}
let package_part = &purl[8..]; // Skip "pkg:npm/"
let parts: Vec<&str> = package_part.rsplitn(2, '@').collect();
if parts.len() != 2 {
continue;
}
let version = parts[0];
let name = parts[1];
let response_json = json!({
"id": "81646",
"name": name,
"version": version,
"score": {
"license": 1.0,
"maintenance": 0.78,
"overall": 0.78,
"quality": 0.94,
"supplyChain": 1.0,
"vulnerability": 1.0
},
"alerts": [
{ "type": "malware", "action": "error", "severity": "critical", "category": "supplyChainRisk" }
]
});
responses.push(response_json.to_string());
}
// Join with newlines for newline-delimited JSON
let response_body = responses.join("\n");
Ok(
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(string_body(&response_body))?,
)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/servers/nodejs_org_mirror.rs | tests/util/server/src/servers/nodejs_org_mirror.rs | // Copyright 2018-2025 the Deno authors. MIT license.
//! Server for NodeJS header tarballs, used by `node-gyp` in tests to download headers
//!
//! Loads from `testdata/assets`, if we update our node version in `process.versions` we'll need to
//! update the header tarball there.
#![allow(clippy::print_stderr)]
use std::collections::HashMap;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::sync::LazyLock;
use bytes::Bytes;
use http::Response;
use http::StatusCode;
use http_body_util::Full;
use http_body_util::combinators::UnsyncBoxBody;
use parking_lot::Mutex;
use crate::PathRef;
use crate::servers::hyper_utils::ServerKind;
use crate::servers::hyper_utils::ServerOptions;
use crate::servers::hyper_utils::run_server;
use crate::servers::string_body;
use crate::testdata_path;
/// a little helper extension trait to log errors but convert to option
trait OkWarn<T, E> {
fn ok_warn(self) -> Option<T>;
}
impl<T, E> OkWarn<T, E> for Result<T, E>
where
E: std::fmt::Display,
{
fn ok_warn(self) -> Option<T> {
self
.inspect_err(|err| {
eprintln!(
"test_server warning: error occurred in nodejs_org_mirror.rs: {err}"
)
})
.ok()
}
}
pub static NODEJS_MIRROR: LazyLock<NodeJsMirror> =
LazyLock::new(NodeJsMirror::default);
#[derive(Default)]
pub struct NodeJsMirror {
cache: Mutex<HashMap<String, Bytes>>,
checksum_cache: Mutex<HashMap<String, String>>,
}
fn asset_file_path(file: &str) -> PathRef {
testdata_path().join("assets").join("node-gyp").join(file)
}
impl NodeJsMirror {
pub fn get_header_bytes(&self, file: &str) -> Option<Bytes> {
let mut cache = self.cache.lock();
let entry = cache.entry(file.to_owned());
match entry {
std::collections::hash_map::Entry::Occupied(occupied) => {
Some(occupied.get().clone())
}
std::collections::hash_map::Entry::Vacant(vacant) => {
let contents = asset_file_path(file);
let contents = contents
.read_to_bytes_if_exists()
.ok_warn()
.map(Bytes::from)?;
vacant.insert(contents.clone());
Some(contents)
}
}
}
fn get_checksum(&self, file: &str, bytes: Bytes) -> String {
use sha2::Digest;
if let Some(checksum) = self.checksum_cache.lock().get(file).cloned() {
return checksum;
}
let mut hasher = sha2::Sha256::new();
hasher.update(&bytes);
let checksum = faster_hex::hex_string(hasher.finalize().as_ref());
self
.checksum_cache
.lock()
.insert(file.to_owned(), checksum.clone());
checksum
}
pub fn get_checksum_file(&self, version: &str) -> Option<String> {
let mut entries = Vec::with_capacity(2);
let header_file = header_tar_name(version);
let header_bytes = self.get_header_bytes(&header_file)?;
let header_checksum = self.get_checksum(&header_file, header_bytes);
entries.push((header_file, header_checksum));
if cfg!(windows) {
if !cfg!(target_arch = "x86_64") {
panic!("unsupported target arch on windows, only support x86_64");
}
let Some(bytes) = self.get_node_lib_bytes(version, "win-x64") else {
eprintln!("test server failed to get node lib");
return None;
};
{
let file = format!("{version}/win-x64/node.lib");
let checksum = self.get_checksum(&file, bytes);
let filename_for_checksum =
file.trim_start_matches(&format!("{version}/"));
entries.push((filename_for_checksum.to_owned(), checksum));
}
}
Some(
entries
.into_iter()
.map(|(file, checksum)| format!("{checksum} {file}"))
.collect::<Vec<_>>()
.join("\n"),
)
}
pub fn get_node_lib_bytes(
&self,
version: &str,
platform: &str,
) -> Option<Bytes> {
let mut cache = self.cache.lock();
let file_name = format!("{version}/{platform}/node.lib");
let entry = cache.entry(file_name);
match entry {
std::collections::hash_map::Entry::Occupied(occupied) => {
Some(occupied.get().clone())
}
std::collections::hash_map::Entry::Vacant(vacant) => {
let tarball_filename =
format!("{version}__{platform}__node.lib.tar.gz");
let contents = asset_file_path(&tarball_filename);
let contents = contents.read_to_bytes_if_exists().ok_warn()?;
let extracted = Bytes::from(extract_tarball(&contents)?);
vacant.insert(extracted.clone());
Some(extracted)
}
}
}
}
fn header_tar_name(version: &str) -> String {
format!("node-{version}-headers.tar.gz")
}
fn extract_tarball(compressed: &[u8]) -> Option<Vec<u8>> {
let mut out = Vec::with_capacity(compressed.len());
let decoder = flate2::read::GzDecoder::new(compressed);
let mut archive = tar::Archive::new(decoder);
for file in archive.entries().ok_warn()? {
let mut file = file.ok_warn()?;
std::io::copy(&mut file, &mut out).ok_warn()?;
}
Some(out)
}
/// Server for node JS header tarballs, used by `node-gyp` in tests
pub async fn nodejs_org_mirror(port: u16) {
let addr = SocketAddr::from(([127, 0, 0, 1], port));
run_server(
ServerOptions {
addr,
error_msg: "nodejs mirror server error",
kind: ServerKind::Auto,
},
|req| async move {
let path = req.uri().path();
if path.contains("-headers.tar.gz")
|| path.contains("SHASUMS256.txt")
|| path.contains("node.lib")
{
let mut parts = path.split('/');
let _ = parts.next(); // empty
let Some(version) = parts.next() else {
return not_found(format!("missing node version in path: {path}"));
};
let Some(file) = parts.next() else {
return not_found(format!("missing file version in path: {path}"));
};
if file == "SHASUMS256.txt" {
let Some(checksum_file) = NODEJS_MIRROR.get_checksum_file(version)
else {
return not_found(format!("failed to get header checksum: {path}"));
};
return Ok(Response::new(string_body(&checksum_file)));
} else if !file.contains("headers") {
let platform = file;
let Some(file) = parts.next() else {
return not_found("expected file");
};
if file != "node.lib" {
return not_found(format!(
"unexpected file name, expected node.lib, got: {file}"
));
}
let Some(bytes) = NODEJS_MIRROR.get_node_lib_bytes(version, platform)
else {
return not_found("expected node lib bytes");
};
return Ok(Response::new(UnsyncBoxBody::new(Full::new(bytes))));
}
let Some(bytes) = NODEJS_MIRROR.get_header_bytes(file) else {
return not_found(format!(
"couldn't find headers for version {version}, missing file: {file}"
));
};
Ok(Response::new(UnsyncBoxBody::new(Full::new(bytes))))
} else {
not_found(format!("unexpected request path: {path}"))
}
},
)
.await
}
fn not_found(
msg: impl AsRef<str>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let msg = msg.as_ref();
eprintln!(
"test_server warning: error likely occurred in nodejs_org_mirror.rs: {msg}"
);
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(string_body(msg))
.map_err(|e| e.into())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/servers/npm_registry.rs | tests/util/server/src/servers/npm_registry.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::convert::Infallible;
use std::future::Future;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
use std::net::SocketAddrV6;
use std::path::PathBuf;
use base64::Engine;
use base64::prelude::BASE64_STANDARD;
use bytes::Bytes;
use futures::FutureExt;
use futures::future::LocalBoxFuture;
use http::HeaderMap;
use http::HeaderValue;
use http_body_util::BodyExt;
use http_body_util::combinators::UnsyncBoxBody;
use hyper::Request;
use hyper::Response;
use hyper::StatusCode;
use hyper::body::Incoming;
use serde_json::json;
use sha2::Digest;
use super::ServerKind;
use super::ServerOptions;
use super::custom_headers;
use super::empty_body;
use super::hyper_utils::HandlerOutput;
use super::run_server;
use super::string_body;
use crate::npm;
use crate::root_path;
pub fn public_npm_registry(port: u16) -> Vec<LocalBoxFuture<'static, ()>> {
run_npm_server(port, "npm registry server error", {
move |req| async move {
handle_req_for_registry(req, &npm::PUBLIC_TEST_NPM_REGISTRY).await
}
})
}
const PRIVATE_NPM_REGISTRY_AUTH_TOKEN: &str = "private-reg-token";
const PRIVATE_NPM_REGISTRY_2_AUTH_TOKEN: &str = "private-reg-token2";
// `deno:land` encoded using base64
const PRIVATE_NPM_REGISTRY_AUTH_BASE64: &str = "ZGVubzpsYW5k";
// `deno:land2` encoded using base64
const PRIVATE_NPM_REGISTRY_2_AUTH_BASE64: &str = "ZGVubzpsYW5kMg==";
pub fn private_npm_registry1(port: u16) -> Vec<LocalBoxFuture<'static, ()>> {
run_npm_server(
port,
"npm private registry server error",
private_npm_registry1_handler,
)
}
pub fn private_npm_registry2(port: u16) -> Vec<LocalBoxFuture<'static, ()>> {
run_npm_server(
port,
"npm private registry server error",
private_npm_registry2_handler,
)
}
pub fn private_npm_registry3(port: u16) -> Vec<LocalBoxFuture<'static, ()>> {
run_npm_server(
port,
"npm private registry server error",
private_npm_registry3_handler,
)
}
fn run_npm_server<F, S>(
port: u16,
error_msg: &'static str,
handler: F,
) -> Vec<LocalBoxFuture<'static, ()>>
where
F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
S: Future<Output = HandlerOutput> + 'static,
{
let npm_registry_addr = SocketAddr::from(([127, 0, 0, 1], port));
let ipv6_loopback = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
let npm_registry_ipv6_addr =
SocketAddr::V6(SocketAddrV6::new(ipv6_loopback, port, 0, 0));
vec![
run_npm_server_for_addr(npm_registry_addr, error_msg, handler)
.boxed_local(),
// necessary because the npm binary will sometimes resolve localhost to ::1
run_npm_server_for_addr(npm_registry_ipv6_addr, error_msg, handler)
.boxed_local(),
]
}
async fn run_npm_server_for_addr<F, S>(
addr: SocketAddr,
error_msg: &'static str,
handler: F,
) where
F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
S: Future<Output = HandlerOutput> + 'static,
{
ensure_esbuild_prebuilt().await.unwrap();
run_server(
ServerOptions {
addr,
kind: ServerKind::Auto,
error_msg,
},
handler,
)
.await
}
async fn private_npm_registry1_handler(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let auth = req
.headers()
.get("authorization")
.and_then(|x| x.to_str().ok())
.unwrap_or_default();
if auth != format!("Bearer {}", PRIVATE_NPM_REGISTRY_AUTH_TOKEN)
&& auth != format!("Basic {}", PRIVATE_NPM_REGISTRY_AUTH_BASE64)
{
return Ok(
Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(empty_body())
.unwrap(),
);
}
handle_req_for_registry(req, &npm::PRIVATE_TEST_NPM_REGISTRY_1).await
}
async fn private_npm_registry2_handler(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let auth = req
.headers()
.get("authorization")
.and_then(|x| x.to_str().ok())
.unwrap_or_default();
if auth != format!("Bearer {}", PRIVATE_NPM_REGISTRY_2_AUTH_TOKEN)
&& auth != format!("Basic {}", PRIVATE_NPM_REGISTRY_2_AUTH_BASE64)
{
return Ok(
Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(empty_body())
.unwrap(),
);
}
handle_req_for_registry(req, &npm::PRIVATE_TEST_NPM_REGISTRY_2).await
}
async fn private_npm_registry3_handler(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
// No auth for this registry
handle_req_for_registry(req, &npm::PRIVATE_TEST_NPM_REGISTRY_3).await
}
async fn handle_req_for_registry(
req: Request<Incoming>,
test_npm_registry: &npm::TestNpmRegistry,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let root_dir = test_npm_registry.root_dir();
// serve the registry package files
let uri_path = req.uri().path();
if uri_path == "/-/npm/v1/security/audits" {
return npm_security_audits(req).await;
}
let mut file_path = root_dir.to_path_buf();
file_path.push(uri_path[1..].replace("%2f", "/").replace("%2F", "/"));
// serve if the filepath exists
if let Ok(file) = tokio::fs::read(&file_path).await {
let file_resp = custom_headers(uri_path, file);
return Ok(file_resp);
}
// otherwise try to serve from the registry
if let Some(resp) = try_serve_npm_registry(
uri_path,
file_path.clone(),
req.headers(),
test_npm_registry,
)
.await
{
return resp;
}
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(empty_body())
.map_err(|e| e.into())
}
fn handle_custom_npm_registry_path(
scope_name: &str,
path: &str,
headers: &HeaderMap<HeaderValue>,
test_npm_registry: &npm::TestNpmRegistry,
) -> Result<Option<Response<UnsyncBoxBody<Bytes, Infallible>>>, anyhow::Error> {
let mut parts = path
.split('/')
.filter(|p| !p.is_empty())
.collect::<Vec<_>>();
let remainder = parts.split_off(1);
let name = parts[0];
let package_name = format!("{}/{}", scope_name, name);
if remainder.len() == 1 {
if let Some(file_bytes) = test_npm_registry
.tarball_bytes(&package_name, remainder[0].trim_end_matches(".tgz"))?
{
let file_resp = custom_headers("file.tgz", file_bytes);
return Ok(Some(file_resp));
}
} else if remainder.is_empty()
&& let Some(registry_file) =
test_npm_registry.registry_file(&package_name)?
{
let actual_etag = format!(
"\"{}\"",
BASE64_STANDARD.encode(sha2::Sha256::digest(®istry_file))
);
if headers.get("If-None-Match").and_then(|v| v.to_str().ok())
== Some(actual_etag.as_str())
{
let mut response = Response::new(UnsyncBoxBody::new(
http_body_util::Full::new(Bytes::from(vec![])),
));
*response.status_mut() = StatusCode::NOT_MODIFIED;
return Ok(Some(response));
}
let mut file_resp = custom_headers("registry.json", registry_file);
file_resp.headers_mut().append(
http::header::ETAG,
http::header::HeaderValue::from_str(&actual_etag).unwrap(),
);
return Ok(Some(file_resp));
}
Ok(None)
}
fn should_download_npm_packages() -> bool {
// when this env var is set, it will download and save npm packages
// to the tests/registry/npm directory
std::env::var("DENO_TEST_UTIL_UPDATE_NPM") == Ok("1".to_string())
}
async fn try_serve_npm_registry(
uri_path: &str,
mut testdata_file_path: PathBuf,
headers: &HeaderMap<HeaderValue>,
test_npm_registry: &npm::TestNpmRegistry,
) -> Option<Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error>> {
if let Some((scope_name, package_name_with_path)) = test_npm_registry
.get_test_scope_and_package_name_with_path_from_uri_path(uri_path)
{
// serve all requests to the `DENOTEST_SCOPE_NAME` or `DENOTEST2_SCOPE_NAME`
// using the file system at that path
match handle_custom_npm_registry_path(
scope_name,
package_name_with_path,
headers,
test_npm_registry,
) {
Ok(Some(response)) => return Some(Ok(response)),
Ok(None) => {} // ignore, not found
Err(err) => {
return Some(
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(string_body(&format!("{err:#}")))
.map_err(|e| e.into()),
);
}
}
} else {
// otherwise, serve based on registry.json and tgz files
let is_tarball = uri_path.ends_with(".tgz");
if !is_tarball {
testdata_file_path.push("registry.json");
}
if let Ok(file) = tokio::fs::read(&testdata_file_path).await {
let file_resp = custom_headers(uri_path, file);
return Some(Ok(file_resp));
} else if should_download_npm_packages() {
if let Err(err) = download_npm_registry_file(
test_npm_registry,
uri_path,
&testdata_file_path,
is_tarball,
)
.await
{
return Some(
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(string_body(&format!("{err:#}")))
.map_err(|e| e.into()),
);
};
// serve the file
if let Ok(file) = tokio::fs::read(&testdata_file_path).await {
let file_resp = custom_headers(uri_path, file);
return Some(Ok(file_resp));
}
}
}
None
}
// Replaces URL of public npm registry (`https://registry.npmjs.org/`) with
// the test registry (`http://localhost:4260`).
//
// These strings end up in `registry.json` files for each downloaded package
// that are stored in `tests/testdata/` directory.
//
// If another npm test registry wants to use them, it should replace
// these values with appropriate URL when serving.
fn replace_default_npm_registry_url_with_test_npm_registry_url(
text: String,
npm_registry: &npm::TestNpmRegistry,
package_name: &str,
) -> String {
let package_name = percent_encoding::percent_decode_str(package_name)
.decode_utf8()
.unwrap();
text.replace(
&format!("https://registry.npmjs.org/{}/-/", package_name),
&npm_registry.package_url(&package_name),
)
}
async fn download_npm_registry_file(
test_npm_registry: &npm::TestNpmRegistry,
uri_path: &str,
testdata_file_path: &PathBuf,
is_tarball: bool,
) -> Result<(), anyhow::Error> {
let uri_path = uri_path.trim_start_matches('/');
let url_parts = uri_path.split('/').collect::<Vec<_>>();
let package_name = if url_parts[0].starts_with('@') {
url_parts.into_iter().take(2).collect::<Vec<_>>().join("/")
} else {
url_parts.into_iter().take(1).collect::<Vec<_>>().join("/")
};
let url = if is_tarball {
let file_name = testdata_file_path.file_name().unwrap().to_string_lossy();
format!("https://registry.npmjs.org/{package_name}/-/{file_name}")
} else {
format!("https://registry.npmjs.org/{package_name}")
};
let client = reqwest::Client::new();
let response = client.get(url).send().await?;
let bytes = response.bytes().await?;
let bytes = if is_tarball {
bytes.to_vec()
} else {
replace_default_npm_registry_url_with_test_npm_registry_url(
String::from_utf8(bytes.to_vec()).unwrap(),
test_npm_registry,
&package_name,
)
.into_bytes()
};
std::fs::create_dir_all(testdata_file_path.parent().unwrap())?;
std::fs::write(testdata_file_path, bytes)?;
Ok(())
}
const PREBUILT_URL: &str = "https://raw.githubusercontent.com/denoland/deno_third_party/de0d517e6f703fb4735b7aa5806f69fbdbb1d907/prebuilt/";
async fn ensure_esbuild_prebuilt() -> Result<(), anyhow::Error> {
let bin_name = match (std::env::consts::ARCH, std::env::consts::OS) {
("x86_64", "linux" | "macos" | "apple") => "esbuild-x64",
("aarch64", "linux" | "macos" | "apple") => "esbuild-aarch64",
("x86_64", "windows") => "esbuild-x64.exe",
("aarch64", "windows") => "esbuild-arm64.exe",
_ => return Err(anyhow::anyhow!("unsupported platform")),
};
let folder = match std::env::consts::OS {
"linux" => "linux64",
"windows" => "win",
"macos" | "apple" => "mac",
_ => return Err(anyhow::anyhow!("unsupported platform")),
};
let esbuild_prebuilt = root_path()
.join("third_party/prebuilt")
.join(folder)
.join(bin_name);
if esbuild_prebuilt.exists() {
return Ok(());
}
let url = format!("{PREBUILT_URL}{folder}/{bin_name}");
let response = reqwest::get(url).await?;
let bytes = response.bytes().await?;
tokio::fs::create_dir_all(esbuild_prebuilt.parent()).await?;
tokio::fs::write(&esbuild_prebuilt, bytes).await?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = tokio::fs::metadata(&esbuild_prebuilt).await?.permissions();
perms.set_mode(0o755); // rwxr-xr-x
tokio::fs::set_permissions(&esbuild_prebuilt, perms).await?;
}
Ok(())
}
async fn npm_security_audits(
req: Request<Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let body = req.into_body().collect().await?.to_bytes();
let json_obj: serde_json::Value = serde_json::from_slice(&body)?;
let Some(resp_body) = process_npm_security_audits_body(json_obj) else {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(empty_body())
.map_err(|e| e.into());
};
Response::builder()
.body(string_body(&serde_json::to_string(&resp_body).unwrap()))
.map_err(|e| e.into())
}
fn process_npm_security_audits_body(
value: serde_json::Value,
) -> Option<serde_json::Value> {
let dependency_count = 0;
let dev_dependency_count = 0;
let optional_dependency_count = 0;
let mut actions = vec![];
let mut advisories = HashMap::new();
let vuln_info = 0;
let vuln_low = 0;
let vuln_moderate = 0;
let mut vuln_high = 0;
let mut vuln_critical = 0;
let requires_map = value.get("requires")?.as_object()?;
let requires_map_keys = requires_map.keys().cloned().collect::<Vec<_>>();
if requires_map_keys.contains(&"@denotest/with-vuln1".to_string()) {
actions.push(get_action_for_with_vuln1());
advisories.insert(101010, get_advisory_for_with_vuln1());
vuln_high += 1;
}
if requires_map_keys.contains(&"@denotest/using-vuln".to_string()) {
actions.extend_from_slice(&get_actions_for_with_vuln2());
advisories.insert(202020, get_advisory_for_with_vuln2());
vuln_critical += 1;
}
if requires_map_keys.contains(&"@denotest/with-vuln3".to_string()) {
actions.push(get_action_for_with_vuln3());
advisories.insert(303030, get_advisory_for_with_vuln3());
vuln_high += 1;
}
Some(json!({
"actions": actions,
"advisories": advisories,
"muted": [],
"metadata": {
"vulnerabilities": {
"info": vuln_info,
"low": vuln_low,
"moderate": vuln_moderate,
"high": vuln_high,
"critical":vuln_critical,
},
"dependencies": dependency_count,
"devDependencies": dev_dependency_count,
"optionalDependencies": optional_dependency_count,
"totalDependencies": dependency_count + dev_dependency_count + optional_dependency_count
}
}))
}
fn get_action_for_with_vuln1() -> serde_json::Value {
json!({
"isMajor": false,
"action": "install",
"resolves": [{
"id": 101010,
"path": "@denotest/with-vuln1",
"dev": false,
"optional": false,
"bundled": false,
}],
"module": "@denotest/with-vuln1",
"target": "1.1.0"
})
}
fn get_advisory_for_with_vuln1() -> serde_json::Value {
json!({
"findings": [
{"version": "1.0.0", "paths": ["@denotest/with-vuln1"]}
],
"id": 101010,
"overview": "Lorem ipsum dolor sit amet",
"title": "@denotest/with-vuln1 is susceptible to prototype pollution",
"severity": "high",
"module_name": "@edenotest/with-vuln1",
"vulnerable_versions": "<1.1.0",
"recommendations": "Upgrade to version 1.1.0 or later",
"patched_versions": ">=1.1.0",
"url": "https://example.com/vuln/101010"
})
}
fn get_actions_for_with_vuln2() -> Vec<serde_json::Value> {
vec![
json!({
"isMajor": true,
"action": "install",
"resolves": [{
"id": 202020,
"path": "@denotest/using-vuln>@denotest/with-vuln2",
"dev": false,
"optional": false,
"bundled": false,
}],
"module": "@denotest/with-vuln2",
"target": "2.0.0"
}),
json!({
"action": "review",
"resolves": [{
"id": 202020,
"path": "@denotest/using-vuln>@denotest/with-vuln2",
"dev": false,
"optional": false,
"bundled": false,
}],
"module": "@denotest/with-vuln2"
}),
]
}
fn get_advisory_for_with_vuln2() -> serde_json::Value {
json!({
"findings": [
{"version": "1.5.0", "paths": ["@denotest/using-vuln>@denotest/with-vuln2"]}
],
"id": 202020,
"overview": "Lorem ipsum dolor sit amet",
"title": "@denotest/with-vuln2 can steal crypto keys",
"severity": "critical",
"module_name": "@edenotest/with-vuln2",
"vulnerable_versions": "<2.0.0",
"recommendations": "Upgrade to version 2.0.0 or later",
"patched_versions": ">=2.0.0",
"url": "https://example.com/vuln/202020"
})
}
fn get_action_for_with_vuln3() -> serde_json::Value {
json!({
"isMajor": false,
"action": "install",
"resolves": [{
"id": 303030,
"path": "@denotest/with-vuln3",
"dev": false,
"optional": false,
"bundled": false,
}],
// Note: "module" field is intentionally omitted to test fallback logic
"target": "1.1.0"
})
}
fn get_advisory_for_with_vuln3() -> serde_json::Value {
json!({
"findings": [
{"version": "1.0.0", "paths": ["@denotest/with-vuln3"]}
],
"id": 303030,
"overview": "Lorem ipsum dolor sit amet",
"title": "@denotest/with-vuln3 has security vulnerability",
"severity": "high",
"module_name": "@edenotest/with-vuln3",
"vulnerable_versions": "<1.1.0",
"recommendations": "Upgrade to version 1.1.0 or later",
"patched_versions": ">=1.1.0",
"url": "https://example.com/vuln/303030"
})
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/servers/jsr_registry.rs | tests/util/server/src/servers/jsr_registry.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Mutex;
use base64::Engine as _;
use base64::engine::general_purpose::STANDARD_NO_PAD;
use bytes::Bytes;
use http_body_util::Empty;
use http_body_util::Full;
use http_body_util::combinators::UnsyncBoxBody;
use hyper::Request;
use hyper::Response;
use hyper::StatusCode;
use hyper::body::Incoming;
use once_cell::sync::Lazy;
use serde_json::json;
use super::ServerKind;
use super::ServerOptions;
use super::run_server;
use crate::tests_path;
pub async fn registry_server(port: u16) {
let registry_server_addr = SocketAddr::from(([127, 0, 0, 1], port));
run_server(
ServerOptions {
addr: registry_server_addr,
error_msg: "Registry server error",
kind: ServerKind::Auto,
},
registry_server_handler,
)
.await
}
pub async fn provenance_mock_server(port: u16) {
let addr = SocketAddr::from(([127, 0, 0, 1], port));
run_server(
ServerOptions {
addr,
error_msg: "Provenance mock server error",
kind: ServerKind::Auto,
},
provenance_mock_server_handler,
)
.await
}
async fn provenance_mock_server_handler(
req: Request<Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let path = req.uri().path();
// OIDC request
if path.starts_with("/gha_oidc") {
let jwt_claim = json!({
"sub": "divy",
"email": "divy@deno.com",
"iss": "https://github.com",
});
let token = format!(
"AAA.{}.",
STANDARD_NO_PAD.encode(serde_json::to_string(&jwt_claim).unwrap())
);
let body = serde_json::to_string_pretty(&json!({
"value": token,
}));
let res = Response::new(UnsyncBoxBody::new(Full::from(body.unwrap())));
return Ok(res);
}
// Fulcio
if path.starts_with("/api/v2/signingCert") {
let body = serde_json::to_string_pretty(&json!({
"signedCertificateEmbeddedSct": {
"chain": {
"certificates": [
"fake_certificate"
]
}
}
}));
let res = Response::new(UnsyncBoxBody::new(Full::from(body.unwrap())));
return Ok(res);
}
// Rekor
if path.starts_with("/api/v1/log/entries") {
let body = serde_json::to_string_pretty(&json!({
"transparency_log_1": {
"logID": "test_log_id",
"logIndex": 42069,
}
}));
let res = Response::new(UnsyncBoxBody::new(Full::from(body.unwrap())));
return Ok(res);
}
let empty_body = UnsyncBoxBody::new(Empty::new());
let res = Response::builder()
.status(StatusCode::NOT_FOUND)
.body(empty_body)?;
Ok(res)
}
async fn registry_server_handler(
req: Request<Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let path = req.uri().path();
// TODO(bartlomieju): add a proper router here
if path.starts_with("/api/scope/") {
let body = serde_json::to_string_pretty(&json!({})).unwrap();
let res = Response::new(UnsyncBoxBody::new(Full::from(body)));
return Ok(res);
} else if path.starts_with("/api/scopes/") {
let body = serde_json::to_string_pretty(&json!({
"id": "sdfwqer-sffg-qwerasdf",
"status": "success",
"error": null
}))
.unwrap();
let res = Response::new(UnsyncBoxBody::new(Full::from(body)));
return Ok(res);
} else if path.starts_with("/api/publish_status/") {
let body = serde_json::to_string_pretty(&json!({
"id": "sdfwqer-qwer-qwerasdf",
"status": "success",
"error": null
}))
.unwrap();
let res = Response::new(UnsyncBoxBody::new(Full::from(body)));
return Ok(res);
}
let accept_header = req
.headers()
.get("accept")
.and_then(|s| s.to_str().ok())
.unwrap_or_default();
if accept_header != "*/*" {
let res = Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(UnsyncBoxBody::new(Empty::new()))?;
return Ok(res);
}
// serve the registry package files
let mut file_path = tests_path().join("registry").join("jsr").to_path_buf();
file_path.push(
req.uri().path()[1..]
.replace("%2f", "/")
.replace("%2F", "/"),
);
if let Ok(body) = tokio::fs::read(&file_path).await {
let body = if let Some(version) = file_path
.file_name()
.unwrap()
.to_string_lossy()
.strip_suffix("_meta.json")
{
// fill the manifest with checksums found in the directory so that
// we don't need to maintain them manually in the testdata directory
let mut meta: serde_json::Value = serde_json::from_slice(&body)?;
let mut manifest =
manifest_sorted(meta.get("manifest").cloned().unwrap_or(json!({})));
let version_dir = file_path.parent().unwrap().join(version);
fill_manifest_at_dir(&mut manifest, &version_dir);
meta
.as_object_mut()
.unwrap()
.insert("manifest".to_string(), json!(manifest));
serde_json::to_string(&meta).unwrap().into_bytes()
} else {
body
};
return Ok(Response::new(UnsyncBoxBody::new(
http_body_util::Full::new(Bytes::from(body)),
)));
}
let empty_body = UnsyncBoxBody::new(Empty::new());
let res = Response::builder()
.status(StatusCode::NOT_FOUND)
.body(empty_body)?;
Ok(res)
}
fn manifest_sorted(
meta: serde_json::Value,
) -> BTreeMap<String, serde_json::Value> {
let mut manifest = BTreeMap::new();
if let serde_json::Value::Object(files) = meta {
for (file, checksum) in files {
manifest.insert(file.clone(), checksum.clone());
}
}
manifest
}
fn fill_manifest_at_dir(
manifest: &mut BTreeMap<String, serde_json::Value>,
dir: &Path,
) {
let file_system_manifest = get_manifest_entries_for_dir(dir);
for (file_path, value) in file_system_manifest {
manifest.entry(file_path).or_insert(value);
}
}
static DIR_MANIFEST_CACHE: Lazy<
Mutex<HashMap<String, BTreeMap<String, serde_json::Value>>>,
> = Lazy::new(Default::default);
fn get_manifest_entries_for_dir(
dir: &Path,
) -> BTreeMap<String, serde_json::Value> {
fn inner_fill(
root_dir: &Path,
dir: &Path,
manifest: &mut BTreeMap<String, serde_json::Value>,
) {
for entry in std::fs::read_dir(dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
if path.is_file() {
let file_bytes = std::fs::read(&path).unwrap();
let checksum = format!("sha256-{}", get_checksum(&file_bytes));
let relative_path = path
.to_string_lossy()
.strip_prefix(&root_dir.to_string_lossy().into_owned())
.unwrap()
.replace('\\', "/");
manifest.insert(
relative_path,
json!({
"size": file_bytes.len(),
"checksum": checksum,
}),
);
} else if path.is_dir() {
inner_fill(root_dir, &path, manifest);
}
}
}
DIR_MANIFEST_CACHE
.lock()
.unwrap()
.entry(dir.to_string_lossy().into_owned())
.or_insert_with(|| {
let mut manifest = BTreeMap::new();
inner_fill(dir, dir, &mut manifest);
manifest
})
.clone()
}
fn get_checksum(bytes: &[u8]) -> String {
use sha2::Digest;
let mut hasher = sha2::Sha256::new();
hasher.update(bytes);
format!("{:x}", hasher.finalize())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/servers/mod.rs | tests/util/server/src/servers/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
// Usage: provide a port as argument to run hyper_hello benchmark server
// otherwise this starts multiple servers on many ports for test endpoints.
use std::collections::HashMap;
use std::convert::Infallible;
use std::env;
use std::net::SocketAddr;
use std::result::Result;
use std::time::Duration;
use base64::Engine;
use base64::prelude::BASE64_STANDARD;
use bytes::Bytes;
use denokv_proto::datapath::AtomicWrite;
use denokv_proto::datapath::AtomicWriteOutput;
use denokv_proto::datapath::AtomicWriteStatus;
use denokv_proto::datapath::ReadRangeOutput;
use denokv_proto::datapath::SnapshotRead;
use denokv_proto::datapath::SnapshotReadOutput;
use denokv_proto::datapath::SnapshotReadStatus;
use futures::FutureExt;
use futures::Stream;
use futures::StreamExt;
use http;
use http::HeaderValue;
use http::Method;
use http::Request;
use http::Response;
use http::StatusCode;
use http_body_util::BodyExt;
use http_body_util::Empty;
use http_body_util::Full;
use http_body_util::combinators::UnsyncBoxBody;
use hyper_utils::run_server_with_remote_addr;
use pretty_assertions::assert_eq;
use prost::Message;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
mod grpc;
mod hyper_utils;
mod jsr_registry;
mod nodejs_org_mirror;
mod npm_registry;
mod socket_dev;
mod ws;
use hyper_utils::ServerKind;
use hyper_utils::ServerOptions;
use hyper_utils::run_server;
use hyper_utils::run_server_with_acceptor;
use super::https::SupportedHttpVersions;
use super::https::get_tls_listener_stream;
use super::testdata_path;
use crate::PathRef;
use crate::TEST_SERVERS_COUNT;
use crate::eprintln;
use crate::prebuilt_path;
use crate::println;
pub(crate) const PORT: u16 = 4545;
const TEST_AUTH_TOKEN: &str = "abcdef123456789";
const TEST_BASIC_AUTH_USERNAME: &str = "testuser123";
const TEST_BASIC_AUTH_PASSWORD: &str = "testpassabc";
const KV_DATABASE_ID: &str = "11111111-1111-1111-1111-111111111111";
const KV_ACCESS_TOKEN: &str = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
const KV_DATABASE_TOKEN: &str = "MOCKMOCKMOCKMOCKMOCKMOCKMOCK";
const REDIRECT_PORT: u16 = 4546;
const ANOTHER_REDIRECT_PORT: u16 = 4547;
const DOUBLE_REDIRECTS_PORT: u16 = 4548;
const INF_REDIRECTS_PORT: u16 = 4549;
const REDIRECT_ABSOLUTE_PORT: u16 = 4550;
const AUTH_REDIRECT_PORT: u16 = 4551;
const TLS_CLIENT_AUTH_PORT: u16 = 4552;
const BASIC_AUTH_REDIRECT_PORT: u16 = 4554;
// 4555 is used by the proxy server
// 4556 is used by net_listen_allow_localhost_4555_fail
const TLS_PORT: u16 = 4557;
// 4558 is used by net_listen_allow_localhost_4555
const HTTPS_PORT: u16 = 5545;
const H1_ONLY_TLS_PORT: u16 = 5546;
const H2_ONLY_TLS_PORT: u16 = 5547;
const H1_ONLY_PORT: u16 = 5548;
const H2_ONLY_PORT: u16 = 5549;
const HTTPS_CLIENT_AUTH_PORT: u16 = 5552;
const WS_PORT: u16 = 4242;
const WSS_PORT: u16 = 4243;
const WSS2_PORT: u16 = 4249;
const WS_CLOSE_PORT: u16 = 4244;
const WS_HANG_PORT: u16 = 4264;
const WS_PING_PORT: u16 = 4245;
const H2_GRPC_PORT: u16 = 4246;
const H2S_GRPC_PORT: u16 = 4247;
pub(crate) const JSR_REGISTRY_SERVER_PORT: u16 = 4250;
pub(crate) const PROVENANCE_MOCK_SERVER_PORT: u16 = 4251;
pub(crate) const NODEJS_ORG_MIRROR_SERVER_PORT: u16 = 4252;
pub(crate) const PUBLIC_NPM_REGISTRY_PORT: u16 = 4260;
pub(crate) const PRIVATE_NPM_REGISTRY_1_PORT: u16 = 4261;
pub(crate) const PRIVATE_NPM_REGISTRY_2_PORT: u16 = 4262;
pub(crate) const PRIVATE_NPM_REGISTRY_3_PORT: u16 = 4263;
pub(crate) const SOCKET_DEV_API_PORT: u16 = 4268;
// Use the single-threaded scheduler. The hyper server is used as a point of
// comparison for the (single-threaded!) benchmarks in cli/bench. We're not
// comparing apples to apples if we use the default multi-threaded scheduler.
#[tokio::main(flavor = "current_thread")]
pub async fn run_all_servers() {
if let Some(port) = env::args().nth(1) {
return hyper_hello(port.parse::<u16>().unwrap()).await;
}
let redirect_server_fut = wrap_redirect_server(REDIRECT_PORT);
let double_redirects_server_fut =
wrap_double_redirect_server(DOUBLE_REDIRECTS_PORT);
let inf_redirects_server_fut = wrap_inf_redirect_server(INF_REDIRECTS_PORT);
let another_redirect_server_fut =
wrap_another_redirect_server(ANOTHER_REDIRECT_PORT);
let auth_redirect_server_fut = wrap_auth_redirect_server(AUTH_REDIRECT_PORT);
let basic_auth_redirect_server_fut =
wrap_basic_auth_redirect_server(BASIC_AUTH_REDIRECT_PORT);
let abs_redirect_server_fut =
wrap_abs_redirect_server(REDIRECT_ABSOLUTE_PORT);
let ws_server_fut = ws::run_ws_server(WS_PORT);
let ws_ping_server_fut = ws::run_ws_ping_server(WS_PING_PORT);
let wss_server_fut = ws::run_wss_server(WSS_PORT);
let ws_close_server_fut = ws::run_ws_close_server(WS_CLOSE_PORT);
let ws_hang_server_fut = ws::run_ws_hang_handshake(WS_HANG_PORT);
let wss2_server_fut = ws::run_wss2_server(WSS2_PORT);
let tls_server_fut = run_tls_server(TLS_PORT);
let tls_client_auth_server_fut =
run_tls_client_auth_server(TLS_CLIENT_AUTH_PORT);
let client_auth_server_https_fut =
wrap_client_auth_https_server(HTTPS_CLIENT_AUTH_PORT);
let main_server_fut = wrap_main_server(PORT);
let main_server_https_fut = wrap_main_https_server(HTTPS_PORT);
let h1_only_server_tls_fut = wrap_https_h1_only_tls_server(H1_ONLY_TLS_PORT);
let h2_only_server_tls_fut = wrap_https_h2_only_tls_server(H2_ONLY_TLS_PORT);
let h1_only_server_fut = wrap_http_h1_only_server(H1_ONLY_PORT);
let h2_only_server_fut = wrap_http_h2_only_server(H2_ONLY_PORT);
let h2_grpc_server_fut = grpc::h2_grpc_server(H2_GRPC_PORT, H2S_GRPC_PORT);
let registry_server_fut =
jsr_registry::registry_server(JSR_REGISTRY_SERVER_PORT);
let provenance_mock_server_fut =
jsr_registry::provenance_mock_server(PROVENANCE_MOCK_SERVER_PORT);
let npm_registry_server_futs =
npm_registry::public_npm_registry(PUBLIC_NPM_REGISTRY_PORT);
let private_npm_registry_1_server_futs =
npm_registry::private_npm_registry1(PRIVATE_NPM_REGISTRY_1_PORT);
let private_npm_registry_2_server_futs =
npm_registry::private_npm_registry2(PRIVATE_NPM_REGISTRY_2_PORT);
let private_npm_registry_3_server_futs =
npm_registry::private_npm_registry3(PRIVATE_NPM_REGISTRY_3_PORT);
let socket_dev_api_futs = socket_dev::api(SOCKET_DEV_API_PORT);
// for serving node header files to node-gyp in tests
let node_js_mirror_server_fut =
nodejs_org_mirror::nodejs_org_mirror(NODEJS_ORG_MIRROR_SERVER_PORT);
if let Err(e) = ensure_tsgo_prebuilt().await {
eprintln!("failed to ensure tsgo prebuilt: {e}");
}
let mut futures = vec![
redirect_server_fut.boxed_local(),
ws_server_fut.boxed_local(),
ws_ping_server_fut.boxed_local(),
wss_server_fut.boxed_local(),
wss2_server_fut.boxed_local(),
tls_server_fut.boxed_local(),
tls_client_auth_server_fut.boxed_local(),
ws_close_server_fut.boxed_local(),
ws_hang_server_fut.boxed_local(),
another_redirect_server_fut.boxed_local(),
auth_redirect_server_fut.boxed_local(),
basic_auth_redirect_server_fut.boxed_local(),
inf_redirects_server_fut.boxed_local(),
double_redirects_server_fut.boxed_local(),
abs_redirect_server_fut.boxed_local(),
main_server_fut.boxed_local(),
main_server_https_fut.boxed_local(),
client_auth_server_https_fut.boxed_local(),
h1_only_server_tls_fut.boxed_local(),
h2_only_server_tls_fut.boxed_local(),
h1_only_server_fut.boxed_local(),
h2_only_server_fut.boxed_local(),
h2_grpc_server_fut.boxed_local(),
registry_server_fut.boxed_local(),
provenance_mock_server_fut.boxed_local(),
node_js_mirror_server_fut.boxed_local(),
];
futures.extend(npm_registry_server_futs);
futures.extend(private_npm_registry_1_server_futs);
futures.extend(private_npm_registry_2_server_futs);
futures.extend(private_npm_registry_3_server_futs);
futures.extend(socket_dev_api_futs);
assert_eq!(futures.len(), TEST_SERVERS_COUNT);
futures::future::join_all(futures).await;
}
fn empty_body() -> UnsyncBoxBody<Bytes, Infallible> {
UnsyncBoxBody::new(Empty::new())
}
fn string_body(str_: &str) -> UnsyncBoxBody<Bytes, Infallible> {
UnsyncBoxBody::new(Full::new(Bytes::from(str_.to_string())))
}
fn json_body(value: serde_json::Value) -> UnsyncBoxBody<Bytes, Infallible> {
let str_ = value.to_string();
string_body(&str_)
}
/// Benchmark server that just serves "hello world" responses.
async fn hyper_hello(port: u16) {
let addr = SocketAddr::from(([127, 0, 0, 1], port));
let handler = move |_: Request<hyper::body::Incoming>| async move {
Ok::<_, anyhow::Error>(Response::new(UnsyncBoxBody::new(
http_body_util::Full::new(Bytes::from("Hello World!")),
)))
};
run_server(
ServerOptions {
addr,
error_msg: "server error",
kind: ServerKind::Auto,
},
handler,
)
.await;
}
fn redirect_resp(url: &str) -> Response<UnsyncBoxBody<Bytes, Infallible>> {
let mut redirect_resp = Response::new(UnsyncBoxBody::new(Empty::new()));
*redirect_resp.status_mut() = StatusCode::MOVED_PERMANENTLY;
redirect_resp
.headers_mut()
.insert(http::header::LOCATION, HeaderValue::from_str(url).unwrap());
redirect_resp
}
async fn redirect(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let p = req.uri().path();
assert_eq!(&p[0..1], "/");
let url = format!("http://localhost:{PORT}{p}");
Ok(redirect_resp(&url))
}
async fn double_redirects(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let p = req.uri().path();
assert_eq!(&p[0..1], "/");
let url = format!("http://localhost:{REDIRECT_PORT}{p}");
Ok(redirect_resp(&url))
}
async fn inf_redirects(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let p = req.uri().path();
assert_eq!(&p[0..1], "/");
let url = format!("http://localhost:{INF_REDIRECTS_PORT}{p}");
Ok(redirect_resp(&url))
}
async fn another_redirect(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let p = req.uri().path();
assert_eq!(&p[0..1], "/");
let url = format!("http://localhost:{PORT}/subdir{p}");
Ok(redirect_resp(&url))
}
async fn auth_redirect(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
if let Some(auth) = req
.headers()
.get("authorization")
.map(|v| v.to_str().unwrap())
&& auth.to_lowercase() == format!("bearer {TEST_AUTH_TOKEN}")
{
let p = req.uri().path();
assert_eq!(&p[0..1], "/");
let url = format!("http://localhost:{PORT}{p}");
return Ok(redirect_resp(&url));
}
let mut resp = Response::new(UnsyncBoxBody::new(Empty::new()));
*resp.status_mut() = StatusCode::NOT_FOUND;
Ok(resp)
}
async fn basic_auth_redirect(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
if let Some(auth) = req
.headers()
.get("authorization")
.map(|v| v.to_str().unwrap())
{
let credentials =
format!("{TEST_BASIC_AUTH_USERNAME}:{TEST_BASIC_AUTH_PASSWORD}");
if auth == format!("Basic {}", BASE64_STANDARD.encode(credentials)) {
let p = req.uri().path();
assert_eq!(&p[0..1], "/");
let url = format!("http://localhost:{PORT}{p}");
return Ok(redirect_resp(&url));
}
}
let mut resp = Response::new(UnsyncBoxBody::new(Empty::new()));
*resp.status_mut() = StatusCode::NOT_FOUND;
Ok(resp)
}
/// Returns a [`Stream`] of [`TcpStream`]s accepted from the given port.
async fn get_tcp_listener_stream(
name: &'static str,
port: u16,
) -> impl Stream<Item = Result<TcpStream, std::io::Error>> + Unpin + Send {
let host_and_port = &format!("localhost:{port}");
// Listen on ALL addresses that localhost can resolves to.
let accept = |listener: tokio::net::TcpListener| {
async {
let result = listener.accept().await;
Some((result.map(|r| r.0), listener))
}
.boxed()
};
let mut addresses = vec![];
let listeners = tokio::net::lookup_host(host_and_port)
.await
.expect(host_and_port)
.inspect(|address| addresses.push(*address))
.map(tokio::net::TcpListener::bind)
.collect::<futures::stream::FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.map(|s| s.unwrap())
.map(|listener| futures::stream::unfold(listener, accept))
.collect::<Vec<_>>();
// Eye catcher for HttpServerCount
println!("ready: {name} on {:?}", addresses);
futures::stream::select_all(listeners)
}
/// This server responds with 'PASS' if client authentication was successful. Try it by running
/// test_server and
/// curl --key tests/testdata/tls/localhost.key \
/// --cert cli/tests/testsdata/tls/localhost.crt \
/// --cacert tests/testdata/tls/RootCA.crt https://localhost:4552/
async fn run_tls_client_auth_server(port: u16) {
let mut tls =
get_tls_listener_stream("tls client auth", port, Default::default()).await;
while let Some(Ok(mut tls_stream)) = tls.next().await {
tokio::spawn(async move {
let Ok(handshake) = tls_stream.handshake().await else {
eprintln!("Failed to handshake");
return;
};
// We only need to check for the presence of client certificates
// here. Rusttls ensures that they are valid and signed by the CA.
let response = match handshake.has_peer_certificates {
true => b"PASS",
false => b"FAIL",
};
tls_stream.write_all(response).await.unwrap();
});
}
}
/// This server responds with 'PASS' if client authentication was successful. Try it by running
/// test_server and
/// curl --cacert tests/testdata/tls/RootCA.crt https://localhost:4553/
async fn run_tls_server(port: u16) {
let mut tls = get_tls_listener_stream("tls", port, Default::default()).await;
while let Some(Ok(mut tls_stream)) = tls.next().await {
tokio::spawn(async move {
tls_stream.write_all(b"PASS").await.unwrap();
});
}
}
async fn absolute_redirect(
req: Request<hyper::body::Incoming>,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
let path = req.uri().path();
if path == "/" {
// We have to manually extract query params here,
// as `req.uri()` returns `PathAndQuery` only,
// and we cannot use `Url::parse(req.uri()).query_pairs()`,
// as it requires url to have a proper base.
let query_params: HashMap<_, _> = req
.uri()
.query()
.unwrap_or_default()
.split('&')
.filter_map(|s| {
s.split_once('=').map(|t| (t.0.to_owned(), t.1.to_owned()))
})
.collect();
if let Some(url) = query_params.get("redirect_to") {
let redirect = redirect_resp(url);
return Ok(redirect);
}
}
if path.starts_with("/REDIRECT") {
let url = &req.uri().path()[9..];
let redirect = redirect_resp(url);
return Ok(redirect);
}
if path.starts_with("/a/b/c")
&& let Some(x_loc) = req.headers().get("x-location")
{
let loc = x_loc.to_str().unwrap();
return Ok(redirect_resp(loc));
}
let file_path = testdata_path().join(&req.uri().path()[1..]);
if file_path.is_dir() || !file_path.exists() {
let mut not_found_resp = Response::new(UnsyncBoxBody::new(Empty::new()));
*not_found_resp.status_mut() = StatusCode::NOT_FOUND;
return Ok(not_found_resp);
}
let file = tokio::fs::read(file_path).await.unwrap();
let file_resp = custom_headers(req.uri().path(), file);
Ok(file_resp)
}
async fn main_server(
req: Request<hyper::body::Incoming>,
remote_addr: SocketAddr,
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
match (req.method(), req.uri().path()) {
(_, "/echo_server") => {
let (parts, body) = req.into_parts();
let mut response = Response::new(UnsyncBoxBody::new(Full::new(
body.collect().await?.to_bytes(),
)));
if let Some(status) = parts.headers.get("x-status") {
*response.status_mut() =
StatusCode::from_bytes(status.as_bytes()).unwrap();
}
response.headers_mut().extend(parts.headers);
Ok(response)
}
(_, "/local_addr") => {
let addr = remote_addr.ip().to_string();
let response = Response::new(string_body(&addr));
Ok(response)
}
(&Method::POST, "/echo_multipart_file") => {
let body = req.into_body();
let bytes = &body.collect().await.unwrap().to_bytes()[0..];
let start = b"--boundary\t \r\n\
Content-Disposition: form-data; name=\"field_1\"\r\n\
\r\n\
value_1 \r\n\
\r\n--boundary\r\n\
Content-Disposition: form-data; name=\"file\"; \
filename=\"file.bin\"\r\n\
Content-Type: application/octet-stream\r\n\
\r\n";
let end = b"\r\n--boundary--\r\n";
let b = [start as &[u8], bytes, end].concat();
let mut response =
Response::new(UnsyncBoxBody::new(Full::new(Bytes::from(b))));
response.headers_mut().insert(
"content-type",
HeaderValue::from_static("multipart/form-data;boundary=boundary"),
);
Ok(response)
}
(&Method::GET, "/ghost_ws_client") => {
use tokio::io::AsyncReadExt;
let mut tcp_stream = TcpStream::connect("localhost:4248").await.unwrap();
#[cfg(unix)]
// SAFETY: set socket keep alive.
unsafe {
use std::os::fd::AsRawFd;
let fd = tcp_stream.as_raw_fd();
let mut val: libc::c_int = 1;
let r = libc::setsockopt(
fd,
libc::SOL_SOCKET,
libc::SO_KEEPALIVE,
&mut val as *mut _ as *mut libc::c_void,
std::mem::size_of_val(&val) as libc::socklen_t,
);
assert_eq!(r, 0);
}
// Typical websocket handshake request.
let headers = [
"GET / HTTP/1.1",
"Host: localhost",
"Upgrade: websocket",
"Connection: Upgrade",
"Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==",
"Sec-WebSocket-Version: 13",
"\r\n",
]
.join("\r\n");
tcp_stream.write_all(headers.as_bytes()).await.unwrap();
let mut buf = [0u8; 200];
let n = tcp_stream.read(&mut buf).await.unwrap();
assert!(n > 0);
// Ghost the server:
// - Close the read half of the connection.
// - forget the TcpStream.
let tcp_stream = tcp_stream.into_std().unwrap();
let _ = tcp_stream.shutdown(std::net::Shutdown::Read);
std::mem::forget(tcp_stream);
let res = Response::new(empty_body());
Ok(res)
}
(_, "/multipart_form_data.txt") => {
let b = "Preamble\r\n\
--boundary\t \r\n\
Content-Disposition: form-data; name=\"field_1\"\r\n\
\r\n\
value_1 \r\n\
\r\n--boundary\r\n\
Content-Disposition: form-data; name=\"field_2\";\
filename=\"file.js\"\r\n\
Content-Type: text/javascript\r\n\
\r\n\
console.log(\"Hi\")\
\r\n--boundary--\r\n\
Epilogue";
let mut res = Response::new(string_body(b));
res.headers_mut().insert(
"content-type",
HeaderValue::from_static("multipart/form-data;boundary=boundary"),
);
Ok(res)
}
(_, "/multipart_form_bad_content_type") => {
let b = "Preamble\r\n\
--boundary\t \r\n\
Content-Disposition: form-data; name=\"field_1\"\r\n\
\r\n\
value_1 \r\n\
\r\n--boundary\r\n\
Content-Disposition: form-data; name=\"field_2\";\
filename=\"file.js\"\r\n\
Content-Type: text/javascript\r\n\
\r\n\
console.log(\"Hi\")\
\r\n--boundary--\r\n\
Epilogue";
let mut res = Response::new(string_body(b));
res.headers_mut().insert(
"content-type",
HeaderValue::from_static("multipart/form-datatststs;boundary=boundary"),
);
Ok(res)
}
(_, "/server_error") => {
let mut res = Response::new(empty_body());
*res.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
Ok(res)
}
(_, "/x_deno_warning.js") => {
let mut res = Response::new(empty_body());
*res.status_mut() = StatusCode::MOVED_PERMANENTLY;
res
.headers_mut()
.insert("X-Deno-Warning", HeaderValue::from_static("foobar"));
res.headers_mut().insert(
"location",
HeaderValue::from_bytes(b"/lsp/x_deno_warning_redirect.js").unwrap(),
);
Ok(res)
}
(_, "/non_ascii_redirect") => {
let mut res = Response::new(empty_body());
*res.status_mut() = StatusCode::MOVED_PERMANENTLY;
res.headers_mut().insert(
"location",
HeaderValue::from_bytes(b"/redirect\xae").unwrap(),
);
Ok(res)
}
(_, "/etag_script.ts") => {
let if_none_match = req.headers().get("if-none-match");
if if_none_match == Some(&HeaderValue::from_static("33a64df551425fcc55e"))
{
let mut resp = Response::new(empty_body());
*resp.status_mut() = StatusCode::NOT_MODIFIED;
resp.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
resp
.headers_mut()
.insert("ETag", HeaderValue::from_static("33a64df551425fcc55e"));
Ok(resp)
} else {
let mut resp = Response::new(string_body("console.log('etag')"));
resp.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
resp
.headers_mut()
.insert("ETag", HeaderValue::from_static("33a64df551425fcc55e"));
Ok(resp)
}
}
(_, "/xTypeScriptTypes.js") => {
let mut res = Response::new(string_body("export const foo = 'foo';"));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/javascript"),
);
res.headers_mut().insert(
"X-TypeScript-Types",
HeaderValue::from_static("./xTypeScriptTypes.d.ts"),
);
Ok(res)
}
(_, "/xTypeScriptTypes.jsx") => {
let mut res = Response::new(string_body("export const foo = 'foo';"));
res
.headers_mut()
.insert("Content-type", HeaderValue::from_static("text/jsx"));
res.headers_mut().insert(
"X-TypeScript-Types",
HeaderValue::from_static("./xTypeScriptTypes.d.ts"),
);
Ok(res)
}
(_, "/xTypeScriptTypes.ts") => {
let mut res =
Response::new(string_body("export const foo: string = 'foo';"));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
res.headers_mut().insert(
"X-TypeScript-Types",
HeaderValue::from_static("./xTypeScriptTypes.d.ts"),
);
Ok(res)
}
(_, "/xTypeScriptTypes.d.ts") => {
let mut res = Response::new(string_body("export const foo: 'foo';"));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/run/type_directives_redirect.js") => {
let mut res = Response::new(string_body("export const foo = 'foo';"));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/javascript"),
);
res.headers_mut().insert(
"X-TypeScript-Types",
HeaderValue::from_static(
"http://localhost:4547/xTypeScriptTypesRedirect.d.ts",
),
);
Ok(res)
}
(_, "/run/type_headers_deno_types.foo.js") => {
let mut res = Response::new(string_body(
"export function foo(text) { console.log(text); }",
));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/javascript"),
);
res.headers_mut().insert(
"X-TypeScript-Types",
HeaderValue::from_static(
"http://localhost:4545/run/type_headers_deno_types.d.ts",
),
);
Ok(res)
}
(_, "/run/type_headers_deno_types.d.ts") => {
let mut res =
Response::new(string_body("export function foo(text: number): void;"));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/run/type_headers_deno_types.foo.d.ts") => {
let mut res =
Response::new(string_body("export function foo(text: string): void;"));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/subdir/xTypeScriptTypesRedirect.d.ts") => {
let mut res = Response::new(string_body(
"import './xTypeScriptTypesRedirected.d.ts';",
));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/subdir/xTypeScriptTypesRedirected.d.ts") => {
let mut res = Response::new(string_body("export const foo: 'foo';"));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/referenceTypes.js") => {
let mut res = Response::new(string_body(
"/// <reference types=\"./xTypeScriptTypes.d.ts\" />\r\nexport const foo = \"foo\";\r\n",
));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/javascript"),
);
Ok(res)
}
(_, "/subdir/file_with_:_in_name.ts") => {
let mut res = Response::new(string_body(
"console.log('Hello from file_with_:_in_name.ts');",
));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/v1/extensionless") => {
let mut res =
Response::new(string_body(r#"export * from "/subdir/mod1.ts";"#));
res.headers_mut().insert(
"content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/subdir/no_js_ext@1.0.0") => {
let mut res = Response::new(string_body(
r#"import { printHello } from "./mod2.ts";
printHello();
"#,
));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/javascript"),
);
Ok(res)
}
(_, "/.well-known/deno-import-intellisense.json") => {
let file_path =
testdata_path().join("lsp/registries/deno-import-intellisense.json");
if let Ok(body) = tokio::fs::read(file_path).await {
Ok(custom_headers(
"/.well-known/deno-import-intellisense.json",
body,
))
} else {
Ok(Response::new(empty_body()))
}
}
(_, "/http_version") => {
let version = format!("{:?}", req.version());
Ok(Response::new(string_body(&version)))
}
(_, "/content_length") => {
let content_length = format!("{:?}", req.headers().get("content-length"));
Ok(Response::new(string_body(&content_length)))
}
(_, "/jsx/jsx-runtime") | (_, "/jsx/jsx-dev-runtime") => {
let mut res = Response::new(string_body(
r#"export function jsx(
_type,
_props,
_key,
_source,
_self,
) {}
export const jsxs = jsx;
export const jsxDEV = jsx;
export const Fragment = Symbol("Fragment");
console.log("imported", import.meta.url);
"#,
));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/javascript"),
);
Ok(res)
}
(_, "/jsx-types/jsx-runtime") | (_, "/jsx-types/jsx-dev-runtime") => {
let mut res = Response::new(string_body(
r#"
/// <reference types="./jsx-runtime.d.ts" />
"#,
));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/javascript"),
);
Ok(res)
}
(_, "/jsx-types/jsx-runtime.d.ts") => {
let mut res = Response::new(string_body(
r#"export function jsx(
_type: "a" | "b",
_props: any,
_key: any,
_source: any,
_self: any,
): any;
export const jsxs: typeof jsx;
export const jsxDEV: typeof jsx;
export const Fragment: unique symbol;
declare global {
namespace JSX {
interface IntrinsicElements {
[tagName: string]: Record<string, any>;
}
}
}
"#,
));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/dynamic") => {
let mut res = Response::new(string_body(
&serde_json::to_string_pretty(&std::time::SystemTime::now()).unwrap(),
));
res
.headers_mut()
.insert("cache-control", HeaderValue::from_static("no-cache"));
Ok(res)
}
(_, "/dynamic_cache") => {
let mut res = Response::new(string_body(
&serde_json::to_string_pretty(&std::time::SystemTime::now()).unwrap(),
));
res.headers_mut().insert(
"cache-control",
HeaderValue::from_static("public, max-age=604800, immutable"),
);
Ok(res)
}
(_, "/dynamic_module.ts") => {
let mut res = Response::new(string_body(&format!(
r#"export const time = {};"#,
std::time::SystemTime::now().elapsed().unwrap().as_nanos()
)));
res.headers_mut().insert(
"Content-type",
HeaderValue::from_static("application/typescript"),
);
Ok(res)
}
(_, "/echo_accept") => {
let accept = req.headers().get("accept").map(|v| v.to_str().unwrap());
let res =
Response::new(json_body(serde_json::json!({ "accept": accept })));
Ok(res)
}
(_, "/search_params") => {
let query = req.uri().query().map(|s| s.to_string());
let res = Response::new(string_body(&query.unwrap_or_default()));
Ok(res)
}
(&Method::POST, "/kv_remote_authorize") => {
if req
.headers()
.get("authorization")
.and_then(|x| x.to_str().ok())
.unwrap_or_default()
!= format!("Bearer {}", KV_ACCESS_TOKEN)
{
return Ok(
Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(empty_body())
.unwrap(),
);
}
Ok(
Response::builder()
.header("content-type", "application/json")
.body(json_body(serde_json::json!({
"version": 1,
"databaseId": KV_DATABASE_ID,
"endpoints": [
{
"url": format!("http://localhost:{}/kv_blackhole", PORT),
"consistency": "strong",
}
],
"token": KV_DATABASE_TOKEN,
"expiresAt": "2099-01-01T00:00:00Z",
})))
.unwrap(),
)
}
(&Method::POST, "/kv_remote_authorize_invalid_format") => {
if req
.headers()
.get("authorization")
.and_then(|x| x.to_str().ok())
.unwrap_or_default()
!= format!("Bearer {}", KV_ACCESS_TOKEN)
{
return Ok(
Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(empty_body())
.unwrap(),
);
}
Ok(
Response::builder()
.header("content-type", "application/json")
.body(json_body(serde_json::json!({
"version": 1,
"databaseId": KV_DATABASE_ID,
})))
.unwrap(),
)
}
(&Method::POST, "/kv_remote_authorize_invalid_version") => {
if req
.headers()
.get("authorization")
.and_then(|x| x.to_str().ok())
.unwrap_or_default()
!= format!("Bearer {}", KV_ACCESS_TOKEN)
{
return Ok(
Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(empty_body())
.unwrap(),
);
}
Ok(
Response::builder()
.header("content-type", "application/json")
.body(json_body(serde_json::json!({
"version": 1000,
"databaseId": KV_DATABASE_ID,
"endpoints": [
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/servers/ws.rs | tests/util/server/src/servers/ws.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::future::Future;
use std::future::poll_fn;
use std::pin::Pin;
use std::result::Result;
use anyhow::anyhow;
use bytes::Bytes;
use fastwebsockets::FragmentCollector;
use fastwebsockets::Frame;
use fastwebsockets::OpCode;
use fastwebsockets::Role;
use fastwebsockets::WebSocket;
use futures::StreamExt;
use futures::future::join3;
use h2::Reason;
use h2::RecvStream;
use h2::server::Handshake;
use h2::server::SendResponse;
use http_body_util::Empty;
use hyper::Method;
use hyper::Request;
use hyper::Response;
use hyper::StatusCode;
use hyper::upgrade::Upgraded;
use hyper_util::rt::TokioIo;
use pretty_assertions::assert_eq;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use super::SupportedHttpVersions;
use super::get_tcp_listener_stream;
use super::get_tls_listener_stream;
use crate::eprintln;
use crate::println;
pub async fn run_ws_server(port: u16) {
let mut tcp = get_tcp_listener_stream("ws", port).await;
while let Some(Ok(stream)) = tcp.next().await {
spawn_ws_server(stream, |ws| Box::pin(echo_websocket_handler(ws)));
}
}
pub async fn run_ws_ping_server(port: u16) {
let mut tcp = get_tcp_listener_stream("ws (ping)", port).await;
while let Some(Ok(stream)) = tcp.next().await {
spawn_ws_server(stream, |ws| Box::pin(ping_websocket_handler(ws)));
}
}
pub async fn run_wss_server(port: u16) {
let mut tls = get_tls_listener_stream("wss", port, Default::default()).await;
while let Some(Ok(tls_stream)) = tls.next().await {
tokio::spawn(async move {
spawn_ws_server(tls_stream, |ws| Box::pin(echo_websocket_handler(ws)));
});
}
}
pub async fn run_ws_close_server(port: u16) {
let mut tcp = get_tcp_listener_stream("ws (close)", port).await;
while let Some(Ok(stream)) = tcp.next().await {
spawn_ws_server(stream, |ws| Box::pin(close_websocket_handler(ws)));
}
}
pub async fn run_ws_hang_handshake(port: u16) {
let mut tcp = get_tcp_listener_stream("ws (hang handshake)", port).await;
while let Some(Ok(mut stream)) = tcp.next().await {
loop {
let mut buf = [0; 1024];
let n = stream.read(&mut buf).await;
if n.is_err() {
break;
}
if n.unwrap() == 0 {
break;
}
}
}
}
pub async fn run_wss2_server(port: u16) {
let mut tls = get_tls_listener_stream(
"wss2 (tls)",
port,
SupportedHttpVersions::Http2Only,
)
.await;
while let Some(Ok(tls)) = tls.next().await {
tokio::spawn(async move {
let mut h2 = h2::server::Builder::new();
h2.enable_connect_protocol();
// Using Bytes is pretty alloc-heavy but this is a test server
let server: Handshake<_, Bytes> = h2.handshake(tls);
let mut server = match server.await {
Ok(server) => server,
Err(e) => {
println!("Failed to handshake h2: {e:?}");
return;
}
};
loop {
let Some(conn) = server.accept().await else {
break;
};
let (recv, send) = match conn {
Ok(conn) => conn,
Err(e) => {
println!("Failed to accept a connection: {e:?}");
break;
}
};
tokio::spawn(handle_wss_stream(recv, send));
}
});
}
}
async fn echo_websocket_handler(
ws: fastwebsockets::WebSocket<TokioIo<Upgraded>>,
) -> Result<(), anyhow::Error> {
let mut ws = FragmentCollector::new(ws);
loop {
let frame = ws.read_frame().await.unwrap();
match frame.opcode {
OpCode::Close => break,
OpCode::Text | OpCode::Binary => {
ws.write_frame(frame).await.unwrap();
}
_ => {}
}
}
Ok(())
}
type WsHandler =
fn(
fastwebsockets::WebSocket<TokioIo<Upgraded>>,
) -> Pin<Box<dyn Future<Output = Result<(), anyhow::Error>> + Send>>;
fn spawn_ws_server<S>(stream: S, handler: WsHandler)
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static,
{
let service = hyper::service::service_fn(
move |mut req: http::Request<hyper::body::Incoming>| async move {
if req.headers().get("user-agent").is_none() {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Empty::new())
.map_err(|e| anyhow!("Error creating response: {}", e));
}
let (response, upgrade_fut) = fastwebsockets::upgrade::upgrade(&mut req)
.map_err(|e| anyhow!("Error upgrading websocket connection: {}", e))?;
tokio::spawn(async move {
let ws = upgrade_fut
.await
.map_err(|e| anyhow!("Error upgrading websocket connection: {}", e))
.unwrap();
if let Err(e) = handler(ws).await {
eprintln!("Error in websocket connection: {}", e);
}
});
Ok::<_, anyhow::Error>(response)
},
);
let io = TokioIo::new(stream);
tokio::spawn(async move {
let conn = hyper::server::conn::http1::Builder::new()
.serve_connection(io, service)
.with_upgrades();
if let Err(e) = conn.await {
eprintln!("websocket server error: {e:?}");
}
});
}
async fn handle_wss_stream(
recv: Request<RecvStream>,
mut send: SendResponse<Bytes>,
) -> Result<(), h2::Error> {
if recv.method() != Method::CONNECT {
eprintln!("wss2: refusing non-CONNECT stream");
send.send_reset(Reason::REFUSED_STREAM);
return Ok(());
}
let Some(protocol) = recv.extensions().get::<h2::ext::Protocol>() else {
eprintln!("wss2: refusing no-:protocol stream");
send.send_reset(Reason::REFUSED_STREAM);
return Ok(());
};
if protocol.as_str() != "websocket" && protocol.as_str() != "WebSocket" {
eprintln!("wss2: refusing non-websocket stream");
send.send_reset(Reason::REFUSED_STREAM);
return Ok(());
}
let mut body = recv.into_body();
let mut response = Response::new(());
*response.status_mut() = StatusCode::OK;
let mut resp = send.send_response(response, false)?;
// Use a duplex stream to talk to fastwebsockets because it's just faster to implement
let (a, b) = tokio::io::duplex(65536);
let f1 = tokio::spawn(tokio::task::unconstrained(async move {
let ws = WebSocket::after_handshake(a, Role::Server);
let mut ws = FragmentCollector::new(ws);
loop {
let frame = ws.read_frame().await.unwrap();
if frame.opcode == OpCode::Close {
break;
}
ws.write_frame(frame).await.unwrap();
}
}));
let (mut br, mut bw) = tokio::io::split(b);
let f2 = tokio::spawn(tokio::task::unconstrained(async move {
loop {
let Some(Ok(data)) = poll_fn(|cx| body.poll_data(cx)).await else {
return;
};
body.flow_control().release_capacity(data.len()).unwrap();
let Ok(_) = bw.write_all(&data).await else {
break;
};
}
}));
let f3 = tokio::spawn(tokio::task::unconstrained(async move {
loop {
let mut buf = [0; 65536];
let n = br.read(&mut buf).await.unwrap();
if n == 0 {
break;
}
resp.reserve_capacity(n);
poll_fn(|cx| resp.poll_capacity(cx)).await;
resp
.send_data(Bytes::copy_from_slice(&buf[0..n]), false)
.unwrap();
}
resp.send_data(Bytes::new(), true).unwrap();
}));
_ = join3(f1, f2, f3).await;
Ok(())
}
async fn close_websocket_handler(
ws: fastwebsockets::WebSocket<TokioIo<Upgraded>>,
) -> Result<(), anyhow::Error> {
let mut ws = FragmentCollector::new(ws);
ws.write_frame(Frame::close_raw(vec![].into()))
.await
.unwrap();
Ok(())
}
async fn ping_websocket_handler(
ws: fastwebsockets::WebSocket<TokioIo<Upgraded>>,
) -> Result<(), anyhow::Error> {
let mut ws = FragmentCollector::new(ws);
for i in 0..9 {
ws.write_frame(Frame::new(true, OpCode::Ping, None, vec![].into()))
.await
.unwrap();
let frame = ws.read_frame().await.unwrap();
assert_eq!(frame.opcode, OpCode::Pong);
assert!(frame.payload.is_empty());
ws.write_frame(Frame::text(
format!("hello {}", i).as_bytes().to_vec().into(),
))
.await
.unwrap();
let frame = ws.read_frame().await.unwrap();
assert_eq!(frame.opcode, OpCode::Text);
assert_eq!(frame.payload, format!("hello {}", i).as_bytes());
}
ws.write_frame(Frame::close(1000, b"")).await.unwrap();
Ok(())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/util/server/src/servers/hyper_utils.rs | tests/util/server/src/servers/hyper_utils.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::convert::Infallible;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::result::Result;
use bytes::Bytes;
use futures::FutureExt;
use futures::Stream;
use futures::StreamExt;
use http;
use http::Request;
use http::Response;
use http_body_util::combinators::UnsyncBoxBody;
use hyper_util::rt::TokioIo;
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use crate::eprintln;
use crate::println;
#[derive(Debug, Clone, Copy)]
pub enum ServerKind {
Auto,
OnlyHttp1,
OnlyHttp2,
}
#[derive(Debug, Clone, Copy)]
pub struct ServerOptions {
pub error_msg: &'static str,
pub addr: SocketAddr,
pub kind: ServerKind,
}
pub type HandlerOutput =
Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error>;
pub async fn run_server<F, S>(options: ServerOptions, handler: F)
where
F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
S: Future<Output = HandlerOutput> + 'static,
{
let fut: Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>> =
async move {
let listener = TcpListener::bind(options.addr).await?;
println!("ready: {}", options.addr);
loop {
let (stream, _) = listener.accept().await?;
let io = TokioIo::new(stream);
deno_unsync::spawn(hyper_serve_connection(
io,
handler,
options.error_msg,
options.kind,
));
}
}
.boxed_local();
if let Err(e) = fut.await {
let err_str = e.to_string();
if !err_str.contains("early eof") {
eprintln!("{}: {:?}", options.error_msg, e);
}
}
}
pub async fn run_server_with_acceptor<A, F, S>(
mut acceptor: Pin<Box<A>>,
handler: F,
error_msg: &'static str,
kind: ServerKind,
) where
A: Stream<Item = io::Result<rustls_tokio_stream::TlsStream<TcpStream>>>
+ ?Sized,
F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
S: Future<Output = HandlerOutput> + 'static,
{
let fut: Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>> =
async move {
while let Some(result) = acceptor.next().await {
let stream = result?;
let io = TokioIo::new(stream);
deno_unsync::spawn(hyper_serve_connection(
io, handler, error_msg, kind,
));
}
Ok(())
}
.boxed_local();
if let Err(e) = fut.await {
let err_str = e.to_string();
if !err_str.contains("early eof") {
eprintln!("{}: {:?}", error_msg, e);
}
}
}
pub async fn run_server_with_remote_addr<F, S>(
options: ServerOptions,
handler: F,
) where
F: Fn(Request<hyper::body::Incoming>, SocketAddr) -> S + Copy + 'static,
S: Future<Output = HandlerOutput> + 'static,
{
let fut: Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>> =
async move {
let listener = TcpListener::bind(options.addr).await?;
println!("ready: {}", options.addr);
loop {
let (stream, addr) = listener.accept().await?;
let io = TokioIo::new(stream);
deno_unsync::spawn(hyper_serve_connection(
io,
move |req| handler(req, addr),
options.error_msg,
options.kind,
));
}
}
.boxed_local();
if let Err(e) = fut.await {
let err_str = e.to_string();
if !err_str.contains("early eof") {
eprintln!("{}: {:?}", options.error_msg, e);
}
}
}
async fn hyper_serve_connection<I, F, S>(
io: I,
handler: F,
error_msg: &'static str,
kind: ServerKind,
) where
I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
S: Future<Output = HandlerOutput> + 'static,
{
let service = hyper::service::service_fn(handler);
let result: Result<(), anyhow::Error> = match kind {
ServerKind::Auto => {
let builder =
hyper_util::server::conn::auto::Builder::new(DenoUnsyncExecutor);
builder
.serve_connection(io, service)
.await
.map_err(|e| anyhow::anyhow!("{:?}", e))
}
ServerKind::OnlyHttp1 => {
let builder = hyper::server::conn::http1::Builder::new();
builder
.serve_connection(io, service)
.await
.map_err(|e| e.into())
}
ServerKind::OnlyHttp2 => {
let builder =
hyper::server::conn::http2::Builder::new(DenoUnsyncExecutor);
builder
.serve_connection(io, service)
.await
.map_err(|e| e.into())
}
};
if let Err(e) = result {
let err_str = e.to_string();
if !err_str.contains("early eof") {
eprintln!("{}: {:?}", error_msg, e);
}
}
}
#[derive(Clone)]
struct DenoUnsyncExecutor;
impl<Fut> hyper::rt::Executor<Fut> for DenoUnsyncExecutor
where
Fut: Future + 'static,
Fut::Output: 'static,
{
fn execute(&self, fut: Fut) {
deno_unsync::spawn(fut);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/pm_tests.rs | tests/integration/pm_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use serde_json::json;
use test_util::TestContextBuilder;
use test_util::assert_contains;
use test_util::env_vars_for_jsr_npm_tests;
use test_util::pty::Pty;
use test_util::test;
#[test]
fn add_basic() {
let starting_deno_json = json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./mod.ts",
});
let context = pm_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&starting_deno_json);
let output = context.new_command().args("add jsr:@denotest/add").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Add jsr:@denotest/add");
temp_dir.join("deno.json").assert_matches_json(json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./mod.ts",
"imports": {
"@denotest/add": "jsr:@denotest/add@^1.0.0"
}
}));
}
#[test]
fn add_basic_no_deno_json() {
let context = pm_context_builder().build();
let temp_dir = context.temp_dir().path();
let output = context.new_command().args("add jsr:@denotest/add").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Add jsr:@denotest/add");
// Don't use `assert_matches_json` to ensure the file is properly formatted.
let expected = r#"{
"imports": {
"@denotest/add": "jsr:@denotest/add@^1.0.0"
}
}
"#;
temp_dir.join("deno.json").assert_matches_text(expected);
}
#[test]
fn add_basic_with_empty_deno_json() {
let context = pm_context_builder().build();
let temp_dir = context.temp_dir();
temp_dir.write("deno.json", "");
let output = context.new_command().args("add jsr:@denotest/add").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Add jsr:@denotest/add");
temp_dir
.path()
.join("deno.json")
.assert_matches_json(json!({
"imports": {
"@denotest/add": "jsr:@denotest/add@^1.0.0"
}
}));
}
#[test]
fn add_version_contraint() {
let context = pm_context_builder().build();
let temp_dir = context.temp_dir().path();
let output = context.new_command().args("add jsr:@denotest/add@1").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Add jsr:@denotest/add");
temp_dir.join("deno.json").assert_matches_json(json!({
"imports": {
"@denotest/add": "jsr:@denotest/add@^1.0.0"
}
}));
}
#[test]
fn add_tilde() {
let context = pm_context_builder().build();
let temp_dir = context.temp_dir().path();
let output = context.new_command().args("add jsr:@denotest/add@~1").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Add jsr:@denotest/add");
temp_dir.join("deno.json").assert_matches_json(json!({
"imports": {
"@denotest/add": "jsr:@denotest/add@~1.0.0"
}
}));
}
#[test]
fn add_multiple() {
let starting_deno_json = json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./mod.ts",
});
let context = pm_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&starting_deno_json);
let output = context
.new_command()
.args("add jsr:@denotest/add jsr:@denotest/subset-type-graph")
.run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Add jsr:@denotest/add");
temp_dir.join("deno.json").assert_matches_json(json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./mod.ts",
"imports": {
"@denotest/add": "jsr:@denotest/add@^1.0.0",
"@denotest/subset-type-graph": "jsr:@denotest/subset-type-graph@^0.1.0"
}
}));
}
#[test]
fn add_npm() {
let context = pm_context_builder().build();
let temp_dir = context.temp_dir().path();
let output = context.new_command().args("add npm:chalk@4.1").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Add npm:chalk");
temp_dir.join("deno.json").assert_matches_json(json!({
"imports": {
"chalk": "npm:chalk@^4.1.2"
}
}));
}
fn pm_context_builder() -> TestContextBuilder {
TestContextBuilder::new()
.use_http_server()
.envs(env_vars_for_jsr_npm_tests())
.use_temp_cwd()
}
#[test(flaky)]
fn approve_scripts_basic() {
if !Pty::is_supported() {
return;
}
let context = pm_context_builder().build();
context
.temp_dir()
.write("deno.json", r#"{"nodeModulesDir": "manual"}"#);
context
.new_command()
.args("install npm:@denotest/node-lifecycle-scripts@1.0.0")
.run()
.skip_output_check();
context
.new_command()
.args("approve-scripts")
.with_pty(|mut pty| {
pty.expect("Select which packages to approve lifecycle scripts for");
pty.expect("@denotest/node-lifecycle-scripts@1.0.0");
pty.write_line(" ");
pty.write_line("\r\n");
pty.expect("Approved npm:@denotest/node-lifecycle-scripts@1.0.0");
pty.expect("@denotest/node-lifecycle-scripts@1.0.0: running");
pty.expect("Ran build script npm:@denotest/node-lifecycle-scripts@1.0.0");
});
context
.temp_dir()
.path()
.join("deno.json")
.assert_matches_json(json!({
"nodeModulesDir": "manual",
"imports": {
"@denotest/node-lifecycle-scripts": "npm:@denotest/node-lifecycle-scripts@1.0.0"
},
"allowScripts": ["npm:@denotest/node-lifecycle-scripts@1.0.0"],
}));
}
#[test(flaky)]
fn approve_scripts_deny_some() {
if !Pty::is_supported() {
return;
}
let context = pm_context_builder().build();
context
.temp_dir()
.write("deno.json", r#"{"nodeModulesDir": "manual"}"#);
context
.new_command()
.args("install npm:@denotest/node-lifecycle-scripts@1.0.0 npm:@denotest/print-npm-user-agent@1.0.0")
.run()
.skip_output_check();
context
.new_command()
.args("approve-scripts")
.with_pty(|mut pty| {
pty.expect("Select which packages to approve lifecycle scripts for");
pty.expect("@denotest/node-lifecycle-scripts@1.0.0");
pty.expect("@denotest/print-npm-user-agent@1.0.0");
pty.write_line(" ");
pty.write_line("\r\n");
pty.expect("Denied npm:@denotest/print-npm-user-agent@1.0.0");
pty.expect("Approved npm:@denotest/node-lifecycle-scripts@1.0.0");
pty.expect("@denotest/node-lifecycle-scripts@1.0.0: running");
pty.expect("Ran build script npm:@denotest/node-lifecycle-scripts@1.0.0");
});
context.temp_dir().path().join("deno.json").assert_matches_json(json!({
"nodeModulesDir": "manual",
"imports": {
"@denotest/node-lifecycle-scripts": "npm:@denotest/node-lifecycle-scripts@1.0.0",
"@denotest/print-npm-user-agent": "npm:@denotest/print-npm-user-agent@1.0.0"
},
"allowScripts": {
"allow": ["npm:@denotest/node-lifecycle-scripts@1.0.0"],
"deny": ["npm:@denotest/print-npm-user-agent@1.0.0"]
},
}));
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/check_tests.rs | tests/integration/check_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_lockfile::NewLockfileOptions;
use deno_lockfile::NpmPackageInfoProvider;
use deno_semver::jsr::JsrDepPackageReq;
use test_util as util;
use util::TestContext;
use util::TestContextBuilder;
use util::println;
use util::test;
#[test]
fn cache_switching_config_then_no_config() {
let context = TestContext::default();
assert!(does_type_checking(&context, true));
assert!(does_type_checking(&context, false));
// should now not do type checking even when it changes
// configs because it previously did
assert!(!does_type_checking(&context, true));
assert!(!does_type_checking(&context, false));
fn does_type_checking(context: &TestContext, with_config: bool) -> bool {
let mut args = vec![
"check".to_string(),
"check/cache_config_on_off/main.ts".to_string(),
];
if with_config {
let mut slice = vec![
"--config".to_string(),
"check/cache_config_on_off/deno.json".to_string(),
];
args.append(&mut slice);
}
let output = context.new_command().args_vec(args).split_output().run();
output.assert_exit_code(0);
let stderr = output.stderr();
stderr.contains("Check")
}
}
#[test]
fn reload_flag() {
// should do type checking whenever someone specifies --reload
let context = TestContext::default();
assert!(does_type_checking(&context, false));
assert!(!does_type_checking(&context, false));
assert!(does_type_checking(&context, true));
assert!(does_type_checking(&context, true));
assert!(!does_type_checking(&context, false));
fn does_type_checking(context: &TestContext, reload: bool) -> bool {
let mut args = vec![
"check".to_string(),
"check/cache_config_on_off/main.ts".to_string(),
];
if reload {
let mut slice = vec!["--reload".to_string()];
args.append(&mut slice);
}
let output = context.new_command().args_vec(args).split_output().run();
output.assert_exit_code(0);
let stderr = output.stderr();
stderr.contains("Check")
}
}
#[test]
fn typecheck_declarations_ns() {
let context = TestContextBuilder::for_jsr().build();
let args = vec![
"check".to_string(),
"--doc-only".to_string(),
util::root_path()
.join("cli/tsc/dts/lib.deno.ns.d.ts")
.to_string_lossy()
.into_owned(),
];
let output = context
.new_command()
.args_vec(args)
.envs(util::env_vars_for_jsr_tests())
.split_output()
.run();
println!("stdout: {}", output.stdout());
println!("stderr: {}", output.stderr());
output.assert_exit_code(0);
}
#[test]
fn typecheck_declarations_unstable() {
let context = TestContext::default();
let args = vec![
"check".to_string(),
"--doc-only".to_string(),
util::root_path()
.join("cli/tsc/dts/lib.deno.unstable.d.ts")
.to_string_lossy()
.into_owned(),
];
let output = context.new_command().args_vec(args).split_output().run();
println!("stdout: {}", output.stdout());
println!("stderr: {}", output.stderr());
output.assert_exit_code(0);
}
#[test]
fn ts_no_recheck_on_redirect() {
let test_context = TestContext::default();
let check_command = test_context.new_command().args_vec([
"run",
"--allow-import",
"--check",
"run/017_import_redirect.ts",
]);
// run once
let output = check_command.run();
output.assert_matches_text("[WILDCARD]Check [WILDCARD]");
// run again
let output = check_command.run();
output.assert_matches_text("Hello, World!\n");
}
#[test]
fn check_error_in_dep_then_fix() {
let test_context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = test_context.temp_dir();
let correct_code =
"export function greet(name: string) {\n return `Hello ${name}`;\n}\n";
let incorrect_code =
"export function greet(name: number) {\n return `Hello ${name}`;\n}\n";
temp_dir.write(
"main.ts",
"import { greet } from './greet.ts';\n\nconsole.log(greet('world'));\n",
);
temp_dir.write("greet.ts", incorrect_code);
let check_command = test_context.new_command().args_vec(["check", "main.ts"]);
let output = check_command.run();
output.assert_matches_text("Check [WILDCARD]main.ts\nTS234[WILDCARD]");
output.assert_exit_code(1);
temp_dir.write("greet.ts", correct_code);
let output = check_command.run();
output.assert_matches_text("Check [WILDCARD]main.ts\n");
temp_dir.write("greet.ts", incorrect_code);
let output = check_command.run();
output.assert_matches_text("Check [WILDCARD]main.ts\nTS234[WILDCARD]");
output.assert_exit_code(1);
}
#[test]
fn json_module_check_then_error() {
let test_context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = test_context.temp_dir();
let correct_code = "{ \"foo\": \"bar\" }";
let incorrect_code = "{ \"foo2\": \"bar\" }";
temp_dir.write(
"main.ts",
"import test from './test.json' with { type: 'json' }; console.log(test.foo);\n",
);
temp_dir.write("test.json", correct_code);
let check_command = test_context.new_command().args_vec(["check", "main.ts"]);
check_command.run().assert_exit_code(0).skip_output_check();
temp_dir.write("test.json", incorrect_code);
check_command
.run()
.assert_matches_text("Check [WILDCARD]main.ts\nTS2551[WILDCARD]")
.assert_exit_code(1);
}
struct TestNpmPackageInfoProvider;
#[async_trait::async_trait(?Send)]
impl NpmPackageInfoProvider for TestNpmPackageInfoProvider {
async fn get_npm_package_info(
&self,
values: &[deno_semver::package::PackageNv],
) -> Result<
Vec<deno_lockfile::Lockfile5NpmInfo>,
Box<dyn std::error::Error + Send + Sync>,
> {
Ok(values.iter().map(|_| Default::default()).collect())
}
}
#[test]
async fn npm_module_check_then_error() {
let test_context = TestContextBuilder::new()
.use_temp_cwd()
.add_npm_env_vars()
.use_http_server()
.build();
let temp_dir = test_context.temp_dir();
temp_dir.write("deno.json", "{}"); // so the lockfile gets loaded
// get the lockfiles values first (this is necessary because the test
// server generates different tarballs based on the operating system)
test_context
.new_command()
.args_vec([
"cache",
"npm:@denotest/breaking-change-between-versions@1.0.0",
"npm:@denotest/breaking-change-between-versions@2.0.0",
])
.run()
.skip_output_check();
let lockfile_path = temp_dir.path().join("deno.lock");
let mut lockfile = deno_lockfile::Lockfile::new(
NewLockfileOptions {
file_path: lockfile_path.to_path_buf(),
content: &lockfile_path.read_to_string(),
overwrite: false,
},
&TestNpmPackageInfoProvider,
)
.await
.unwrap();
// make the specifier resolve to version 1
lockfile.content.packages.specifiers.insert(
JsrDepPackageReq::from_str(
"npm:@denotest/breaking-change-between-versions",
)
.unwrap(),
"1.0.0".into(),
);
lockfile_path.write(lockfile.as_json_string());
temp_dir.write(
"main.ts",
"import { oldName } from 'npm:@denotest/breaking-change-between-versions'; console.log(oldName());\n",
);
let check_command = test_context.new_command().args_vec(["check", "main.ts"]);
check_command.run().assert_exit_code(0).skip_output_check();
// now update the lockfile to use version 2 instead, which should cause a
// type checking error because the oldName no longer exists
lockfile.content.packages.specifiers.insert(
JsrDepPackageReq::from_str(
"npm:@denotest/breaking-change-between-versions",
)
.unwrap(),
"2.0.0".into(),
);
lockfile_path.write(lockfile.as_json_string());
check_command
.run()
.assert_matches_text("Check [WILDCARD]main.ts\nTS2305[WILDCARD]has no exported member 'oldName'[WILDCARD]")
.assert_exit_code(1);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/compile_tests.rs | tests/integration/compile_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use test_util as util;
use test_util::eprintln;
use test_util::test;
use util::TestContext;
use util::TestContextBuilder;
use util::assert_not_contains;
use util::testdata_path;
#[test]
fn compile_basic() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("welcome.exe")
} else {
dir.path().join("welcome")
};
// try this twice to ensure it works with the cache
for _ in 0..2 {
let output = context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"../../tests/testdata/welcome.ts",
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context.new_command().name(&exe).run();
output.assert_matches_text("Welcome to Deno!\n");
}
// On arm64 macOS, check if `codesign -v` passes
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
{
let output = std::process::Command::new("codesign")
.arg("-v")
.arg(&exe)
.output()
.unwrap();
assert!(output.status.success());
}
// now ensure this works when the deno_dir is readonly
let readonly_dir = dir.path().join("readonly");
readonly_dir.make_dir_readonly();
let readonly_sub_dir = readonly_dir.join("sub");
let output = context
.new_command()
// it should fail creating this, but still work
.env("DENO_DIR", readonly_sub_dir)
.name(exe)
.run();
output.assert_matches_text("Welcome to Deno!\n");
}
#[test]
fn standalone_args() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("args.exe")
} else {
dir.path().join("args")
};
context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/args.ts",
"a",
"b",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.name(&exe)
.args("foo --bar --unstable")
.run()
.assert_exit_code(0)
.assert_matches_text("a\nb\nfoo\n--bar\n--unstable\n");
}
#[test]
fn standalone_load_datauri() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("load_datauri.exe")
} else {
dir.path().join("load_datauri")
};
context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/standalone_import_datauri.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.name(&exe)
.run()
.assert_exit_code(0)
.assert_matches_text("Hello Deno!\n");
}
// https://github.com/denoland/deno/issues/13704
#[test]
fn standalone_follow_redirects() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("follow_redirects.exe")
} else {
dir.path().join("follow_redirects")
};
context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"--config",
"../config/deno.json",
"./compile/standalone_follow_redirects.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.name(&exe)
.run()
.assert_exit_code(0)
.assert_matches_text("Hello\n");
}
#[test]
fn compile_with_file_exists_error() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let output_path = if cfg!(windows) {
dir.path().join(r"args\")
} else {
dir.path().join("args/")
};
let file_path = dir.path().join("args");
file_path.write("");
context
.new_command()
.args_vec([
"compile",
"--output",
&output_path.to_string_lossy(),
"./compile/args.ts",
])
.run()
.assert_exit_code(1)
.assert_matches_text(format!(
concat!(
"[WILDCARD]error: Could not compile to file '{}' because its parent directory ",
"is an existing file. You can use the `--output <file-path>` flag to ",
"provide an alternative name.\n",
),
file_path,
));
}
#[test]
fn compile_with_directory_exists_error() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("args.exe")
} else {
dir.path().join("args")
};
std::fs::create_dir(&exe).unwrap();
context.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/args.ts"
]).run()
.assert_exit_code(1)
.assert_matches_text(format!(
concat!(
"[WILDCARD]error: Could not compile to file '{}' because a directory exists with ",
"the same name. You can use the `--output <file-path>` flag to ",
"provide an alternative name.\n"
),
exe
));
}
#[test]
fn compile_with_conflict_file_exists_error() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("args.exe")
} else {
dir.path().join("args")
};
std::fs::write(&exe, b"SHOULD NOT BE OVERWRITTEN").unwrap();
context.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/args.ts"
]).run()
.assert_exit_code(1)
.assert_matches_text(format!(
concat!(
"[WILDCARD]error: Could not compile to file '{}' because the file already exists ",
"and cannot be overwritten. Please delete the existing file or ",
"use the `--output <file-path>` flag to provide an alternative name.\n"
),
exe
));
exe.assert_matches_text("SHOULD NOT BE OVERWRITTEN");
}
#[test]
fn compile_and_overwrite_file() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("args.exe")
} else {
dir.path().join("args")
};
// do this twice
for _ in 0..2 {
context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/args.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
assert!(&exe.exists());
}
}
#[test]
fn standalone_runtime_flags() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("flags.exe")
} else {
dir.path().join("flags")
};
context
.new_command()
.args_vec([
"compile",
"--allow-read",
"--seed",
"1",
"--output",
&exe.to_string_lossy(),
"./compile/standalone_runtime_flags.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.env("NO_COLOR", "1")
.name(&exe)
.split_output()
.run()
.assert_exit_code(1)
.assert_stdout_matches_text("0.1472050634010581\n")
.assert_stderr_matches_text(
"[WILDCARD]NotCapable: Requires write access to[WILDCARD]",
);
}
#[test]
fn standalone_ext_flag_ts() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("ext_flag_ts.exe")
} else {
dir.path().join("ext_flag_ts")
};
context
.new_command()
.args_vec([
"compile",
"--ext",
"ts",
"--output",
&exe.to_string_lossy(),
"./file_extensions/ts_without_extension",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.env("NO_COLOR", "1")
.name(&exe)
.run()
.assert_exit_code(0)
.assert_matches_text("executing typescript with no extension\n");
}
#[test]
fn standalone_ext_flag_js() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("ext_flag_js.exe")
} else {
dir.path().join("ext_flag_js")
};
context
.new_command()
.args_vec([
"compile",
"--ext",
"js",
"--output",
&exe.to_string_lossy(),
"./file_extensions/js_without_extension",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.env("NO_COLOR", "1")
.name(&exe)
.run()
.assert_matches_text("executing javascript with no extension\n");
}
#[test]
fn standalone_import_map() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("import_map.exe")
} else {
dir.path().join("import_map")
};
context
.new_command()
.args_vec([
"compile",
"--allow-read",
"--import-map",
"compile/standalone_import_map.json",
"--output",
&exe.to_string_lossy(),
"./compile/standalone_import_map.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.name(&exe)
.run()
.skip_output_check()
.assert_exit_code(0);
}
#[test]
fn standalone_import_map_config_file() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("import_map.exe")
} else {
dir.path().join("import_map")
};
context
.new_command()
.args_vec([
"compile",
"--allow-read",
"--config",
"compile/standalone_import_map_config.json",
"--output",
&exe.to_string_lossy(),
"./compile/standalone_import_map.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.name(&exe)
.run()
.skip_output_check()
.assert_exit_code(0);
}
#[test]
// https://github.com/denoland/deno/issues/12670
fn skip_rebundle() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("hello_world.exe")
} else {
dir.path().join("hello_world")
};
let output = context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./run/001_hello.js",
])
.run();
//no "Bundle testdata_path/run/001_hello.js" in output
assert_not_contains!(output.combined_output(), "Bundle");
context
.new_command()
.name(&exe)
.run()
.assert_exit_code(0)
.assert_matches_text("Hello World\n");
}
#[test]
fn check_local_by_default() {
let context = TestContext::with_http_server();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("welcome.exe")
} else {
dir.path().join("welcome")
};
context
.new_command()
.args_vec([
"compile",
"--allow-import",
"--output",
&exe.to_string_lossy(),
"./compile/check_local_by_default.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
}
#[test]
fn check_local_by_default2() {
let context = TestContext::with_http_server();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("welcome.exe")
} else {
dir.path().join("welcome")
};
context
.new_command()
.args_vec([
"compile",
"--allow-import",
"--output",
&exe.to_string_lossy(),
"./compile/check_local_by_default2.ts"
])
.run()
.assert_exit_code(1)
.assert_matches_text(
r#"[WILDCARD]TS2322 [ERROR]: Type '12' is not assignable to type '"b"'.[WILDCARD]"#,
);
}
#[test]
fn workers_basic() {
let context = TestContext::with_http_server();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("basic.exe")
} else {
dir.path().join("basic")
};
context
.new_command()
.args_vec([
"compile",
"--no-check",
"--output",
&exe.to_string_lossy(),
"./compile/workers/basic.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.name(&exe)
.run()
.assert_exit_code(0)
.assert_matches_file("./compile/workers/basic.out");
}
#[test]
fn workers_not_in_module_map() {
let context = TestContext::with_http_server();
let temp_dir = context.temp_dir();
let exe = if cfg!(windows) {
temp_dir.path().join("not_in_module_map.exe")
} else {
temp_dir.path().join("not_in_module_map")
};
let output = context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/workers/not_in_module_map.ts",
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context.new_command().name(exe).env("NO_COLOR", "").run();
output.assert_exit_code(1);
output.assert_matches_text(concat!(
"error: Uncaught (in worker \"\") Module not found: [WILDCARD]",
"error: Uncaught (in promise) Error: Unhandled error in child worker.\n[WILDCARD]"
));
}
#[test]
fn workers_with_include_flag() {
let context = TestContext::with_http_server();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("workers_with_include_flag.exe")
} else {
dir.path().join("workers_with_include_flag")
};
context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"--include",
"./compile/workers/worker.ts",
"./compile/workers/not_in_module_map.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.name(&exe)
.env("NO_COLOR", "")
.run()
.assert_matches_text("Hello from worker!\nReceived 42\nClosing\n");
}
#[test]
fn dynamic_import() {
let context = TestContext::with_http_server();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("dynamic_import.exe")
} else {
dir.path().join("dynamic_import")
};
context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/dynamic_imports/main.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.name(&exe)
.env("NO_COLOR", "")
.run()
.assert_matches_file("./compile/dynamic_imports/main.out")
.assert_exit_code(0);
}
#[test]
fn dynamic_import_unanalyzable() {
let context = TestContext::with_http_server();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("dynamic_import_unanalyzable.exe")
} else {
dir.path().join("dynamic_import_unanalyzable")
};
context
.new_command()
.args_vec([
"compile",
"--allow-read",
"--include",
"./compile/dynamic_imports/import1.ts",
"--output",
&exe.to_string_lossy(),
"./compile/dynamic_imports/main_unanalyzable.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
context
.new_command()
.current_dir(util::root_path())
.name(&exe)
.env("NO_COLOR", "")
.run()
.assert_matches_file("./compile/dynamic_imports/main.out")
.assert_exit_code(0);
}
// TODO(2.0): this test should first run `deno install`?
#[test]
#[ignore]
fn compile_npm_specifiers() {
let context = TestContextBuilder::for_npm().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write(
"main.ts",
concat!(
"import path from 'node:path';\n",
"import { getValue, setValue } from 'npm:@denotest/esm-basic';\n",
"import getValueDefault from 'npm:@denotest/esm-import-cjs-default';\n",
"setValue(2);\n",
"console.log(path.join('testing', 'this'));",
"console.log(getValue());",
"console.log(getValueDefault());",
),
);
let binary_path = if cfg!(windows) {
temp_dir.path().join("binary.exe")
} else {
temp_dir.path().join("binary")
};
// try with and without --node-modules-dir
let compile_commands = &[
"compile --output binary main.ts",
"compile --node-modules-dir --output binary main.ts",
];
for compile_command in compile_commands {
let output = context.new_command().args(compile_command).run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context.new_command().name(&binary_path).run();
output.assert_matches_text(
r#"Node esm importing node cjs
===========================
{
default: [Function (anonymous)],
named: [Function (anonymous)],
MyClass: [class MyClass]
}
{ default: [Function (anonymous)], named: [Function (anonymous)] }
[Module: null prototype] {
MyClass: [class MyClass],
__esModule: true,
default: {
default: [Function (anonymous)],
named: [Function (anonymous)],
MyClass: [class MyClass]
},
named: [Function (anonymous)]
}
[Module: null prototype] {
__esModule: true,
default: { default: [Function (anonymous)], named: [Function (anonymous)] },
named: [Function (anonymous)]
}
===========================
static method
testing[WILDCARD]this
2
5
"#,
);
}
// try with a package.json
temp_dir.remove_dir_all("node_modules");
temp_dir.write(
"main.ts",
concat!(
"import { getValue, setValue } from '@denotest/esm-basic';\n",
"setValue(2);\n",
"console.log(getValue());",
),
);
temp_dir.write(
"package.json",
r#"{ "dependencies": { "@denotest/esm-basic": "1" } }"#,
);
context
.new_command()
.args("compile --output binary main.ts")
.run()
.assert_exit_code(0)
.skip_output_check();
context
.new_command()
.name(&binary_path)
.run()
.assert_matches_text("2\n");
// now try with byonm
temp_dir.remove_dir_all("node_modules");
temp_dir.write("deno.json", r#"{"unstable":["byonm"]}"#);
context.run_npm("install");
context
.new_command()
.args("compile --output binary main.ts")
.run()
.assert_exit_code(0)
.assert_matches_text(
"Check main.ts\nCompile main.ts to binary[WILDLINE]\n",
);
context
.new_command()
.name(&binary_path)
.run()
.assert_matches_text("2\n");
}
#[test]
fn compile_npm_bin_esm() {
run_npm_bin_compile_test(RunNpmBinCompileOptions {
input_specifier: "npm:@denotest/bin/cli-esm",
copy_temp_dir: None,
compile_args: vec![],
run_args: vec!["this", "is", "a", "test"],
output_file: "npm/deno_run_esm.out",
node_modules_local: false,
input_name: None,
expected_name: "cli-esm",
exit_code: 0,
});
}
#[test]
fn compile_npm_bin_cjs() {
run_npm_bin_compile_test(RunNpmBinCompileOptions {
input_specifier: "npm:@denotest/bin/cli-cjs",
copy_temp_dir: None,
compile_args: vec![],
run_args: vec!["this", "is", "a", "test"],
output_file: "npm/deno_run_cjs.out",
node_modules_local: false,
input_name: None,
expected_name: "cli-cjs",
exit_code: 0,
});
}
#[test]
fn compile_npm_cowsay_main() {
run_npm_bin_compile_test(RunNpmBinCompileOptions {
input_specifier: "npm:cowsay@1.5.0",
copy_temp_dir: None,
compile_args: vec!["--allow-read", "--allow-env"],
run_args: vec!["Hello"],
output_file: "npm/deno_run_cowsay.out",
node_modules_local: false,
input_name: None,
expected_name: "cowsay",
exit_code: 0,
});
}
#[test]
fn compile_npm_no_permissions() {
run_npm_bin_compile_test(RunNpmBinCompileOptions {
input_specifier: "npm:@denotest/cli-with-permissions@1.0.0",
copy_temp_dir: None,
compile_args: vec!["-o", "denotest"],
run_args: vec!["Hello"],
output_file: "npm/compile_npm_no_permissions.out",
node_modules_local: false,
input_name: None,
expected_name: "denotest",
exit_code: 1,
});
}
#[test]
fn compile_npm_cowsay_explicit() {
run_npm_bin_compile_test(RunNpmBinCompileOptions {
input_specifier: "npm:cowsay@1.5.0/cowsay",
copy_temp_dir: None,
compile_args: vec!["--allow-read", "--allow-env"],
run_args: vec!["Hello"],
output_file: "npm/deno_run_cowsay.out",
node_modules_local: false,
input_name: None,
expected_name: "cowsay",
exit_code: 0,
});
}
#[test]
fn compile_npm_cowthink() {
run_npm_bin_compile_test(RunNpmBinCompileOptions {
input_specifier: "npm:cowsay@1.5.0/cowthink",
copy_temp_dir: None,
compile_args: vec!["--allow-read", "--allow-env"],
run_args: vec!["Hello"],
output_file: "npm/deno_run_cowthink.out",
node_modules_local: false,
input_name: None,
expected_name: "cowthink",
exit_code: 0,
});
}
struct RunNpmBinCompileOptions<'a> {
input_specifier: &'a str,
copy_temp_dir: Option<&'a str>,
node_modules_local: bool,
output_file: &'a str,
input_name: Option<&'a str>,
expected_name: &'a str,
run_args: Vec<&'a str>,
compile_args: Vec<&'a str>,
exit_code: i32,
}
fn run_npm_bin_compile_test(opts: RunNpmBinCompileOptions) {
let builder = TestContextBuilder::for_npm();
let context = match opts.copy_temp_dir {
Some(copy_temp_dir) => builder.use_copy_temp_dir(copy_temp_dir).build(),
None => builder.use_temp_cwd().build(),
};
let temp_dir = context.temp_dir();
let mut args = vec!["compile".to_string()];
args.extend(opts.compile_args.iter().map(|s| s.to_string()));
if opts.node_modules_local {
args.push("--node-modules-dir=auto".to_string());
}
if let Some(bin_name) = opts.input_name {
args.push("--output".to_string());
args.push(bin_name.to_string());
}
args.push(opts.input_specifier.to_string());
// compile
let output = context.new_command().args_vec(args).run();
output.assert_exit_code(0);
eprintln!("{}", output.combined_output());
output.skip_output_check();
// delete the npm folder in the DENO_DIR to ensure it's not using it
context.deno_dir().remove_dir_all("./npm");
// run
let binary_path = if cfg!(windows) {
temp_dir.path().join(format!("{}.exe", opts.expected_name))
} else {
temp_dir.path().join(opts.expected_name)
};
let output = context
.new_command()
.name(binary_path)
.args_vec(opts.run_args)
.run();
output.assert_exit_code(opts.exit_code);
output.assert_matches_file(opts.output_file);
}
#[test]
fn compile_node_modules_symlink_outside() {
// this code is using a canonicalized temp dir because otherwise
// it fails on the Windows CI because Deno makes the root directory
// a common ancestor of the symlinked temp dir and the canonicalized
// temp dir, which causes the warnings to not be surfaced
#[allow(deprecated)]
let context = TestContextBuilder::for_npm()
.use_canonicalized_temp_dir()
.use_copy_temp_dir("compile/node_modules_symlink_outside")
.cwd("compile/node_modules_symlink_outside")
.build();
let temp_dir = context.temp_dir();
let project_dir = temp_dir
.path()
.join("compile")
.join("node_modules_symlink_outside");
let symlink_target_dir = temp_dir.path().join("some_folder");
project_dir.join("node_modules").create_dir_all();
symlink_target_dir.create_dir_all();
symlink_target_dir.join("file.txt").write("5");
let symlink_target_file = temp_dir.path().join("target.txt");
symlink_target_file.write("5");
let symlink_dir = project_dir.join("node_modules").join("symlink_dir");
// create a symlink in the node_modules directory that points to a folder outside the project
temp_dir.symlink_dir(&symlink_target_dir, &symlink_dir);
// compile folder
let output = context
.new_command()
.args("compile --allow-read --node-modules-dir=auto --output bin main.ts")
.run();
output.assert_exit_code(0);
output.assert_matches_file(
"compile/node_modules_symlink_outside/main_compile_folder.out",
);
assert!(symlink_dir.exists());
// Cleanup and remove the folder. The folder test is done separately from
// the file symlink test because different systems would traverse
// the directory items in different order.
symlink_dir.remove_dir_all();
// create a symlink in the node_modules directory that points to a file in the cwd
temp_dir.symlink_file(
&symlink_target_file,
project_dir.join("node_modules").join("test.txt"),
);
assert!(project_dir.join("node_modules/test.txt").exists());
// compile
let output = context
.new_command()
.args("compile --allow-read --node-modules-dir=auto --output bin main.ts")
.run();
output.assert_exit_code(0);
output.assert_matches_file(
"compile/node_modules_symlink_outside/main_compile_file.out",
);
// run
let binary_path =
project_dir.join(if cfg!(windows) { "bin.exe" } else { "bin" });
let output = context.new_command().name(binary_path).run();
output.assert_matches_file("compile/node_modules_symlink_outside/main.out");
}
#[test]
fn compile_node_modules_symlink_non_existent() {
let context = TestContextBuilder::for_npm().use_temp_cwd().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("main.ts").write(
r#"import { getValue, setValue } from "npm:@denotest/esm-basic";
setValue(4);
console.log(getValue());"#,
);
let node_modules_dir = temp_dir.join("node_modules");
node_modules_dir.create_dir_all();
// create a symlink that points to a non_existent file
node_modules_dir.symlink_dir("non_existent", "folder");
// compile folder
let output = context
.new_command()
.args("compile --allow-read --node-modules-dir=auto --output bin main.ts")
.run();
output.assert_exit_code(0);
output.assert_matches_text(
r#"Download http://localhost:4260/@denotest%2fesm-basic
Download http://localhost:4260/@denotest/esm-basic/1.0.0.tgz
Initialize @denotest/esm-basic@1.0.0
Check main.ts
Compile main.ts to [WILDCARD]
Warning Failed resolving symlink. Ignoring.
Path: [WILDCARD]
Message: [WILDCARD])
Embedded Files
[WILDCARD]
"#,
);
// run
let binary_path =
temp_dir.join(if cfg!(windows) { "bin.exe" } else { "bin" });
let output = context.new_command().name(binary_path).run();
output.assert_matches_text("4\n");
}
#[test]
fn dynamic_imports_tmp_lit() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("app.exe")
} else {
dir.path().join("app")
};
let output = context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/dynamic_imports_tmp_lit/main.js",
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context.new_command().name(&exe).run();
output.assert_matches_text("a\nb\n{ data: 5 }\n{ data: 1 }\n");
}
#[test]
fn granular_unstable_features() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("app.exe")
} else {
dir.path().join("app")
};
let output = context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"--unstable-kv",
"--unstable-temporal",
"./compile/unstable_features.ts",
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context.new_command().name(&exe).run();
output.assert_exit_code(0);
output.assert_matches_text("Kv {}\nObject [Temporal] {}\n");
}
#[test]
fn granular_unstable_features_config_file() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let dir = context.temp_dir();
testdata_path()
.join("compile/unstable_features.ts")
.copy(&dir.path().join("unstable_features.ts"));
let exe = if cfg!(windows) {
dir.path().join("app.exe")
} else {
dir.path().join("app")
};
dir.write(
"deno.json",
serde_json::to_string_pretty(&serde_json::json!({
"unstable": ["kv", "temporal"]
}))
.unwrap(),
);
let output = context
.new_command()
.args_vec([
"compile",
"--config",
&dir.path().join("deno.json").to_string(),
"--output",
&exe.to_string_lossy(),
"./unstable_features.ts",
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context.new_command().name(&exe).run();
output.assert_exit_code(0);
output.assert_matches_text("Kv {}\nObject [Temporal] {}\n");
}
#[test]
fn dynamic_import_bad_data_uri() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("app.exe")
} else {
dir.path().join("app")
};
let file = dir.path().join("bad_data_uri.ts");
file.write("await import('data:application/')");
let output = context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
&file.to_string_lossy(),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context.new_command().name(&exe).run();
output.assert_exit_code(1);
output.assert_matches_text(
"[WILDCARD]TypeError: Unable to decode data url.[WILDCARD]",
);
}
#[test]
fn standalone_config_file_respects_compiler_options() {
let context = TestContextBuilder::new().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("compiler_options.exe")
} else {
dir.path().join("compiler_options")
};
context
.new_command()
.args_vec([
"compile",
"--allow-read",
"--config",
"compile/compiler_options/deno.json",
"--output",
&exe.to_string_lossy(),
"./compile/compiler_options/main.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
let output = context.new_command().name(&exe).run();
output.assert_exit_code(0);
output.assert_matches_text("[WILDCARD]C.test() called[WILDCARD]");
}
#[test]
fn standalone_jsr_dynamic_import() {
let context = TestContextBuilder::for_jsr().build();
let dir = context.temp_dir();
let exe = if cfg!(windows) {
dir.path().join("jsr_dynamic_import.exe")
} else {
dir.path().join("jsr_dynamic_import")
};
context
.new_command()
.args_vec([
"compile",
"--output",
&exe.to_string_lossy(),
"./compile/jsr_dynamic_import/main.ts",
])
.run()
.skip_output_check()
.assert_exit_code(0);
let output = context.new_command().name(&exe).run();
output.assert_exit_code(0);
output.assert_matches_text("Hello world\n");
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/flags_tests.rs | tests/integration/flags_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use test_util as util;
use test_util::test;
use util::assert_contains;
#[test]
fn help_output() {
let output = util::deno_cmd()
.current_dir(util::testdata_path())
.arg("--help")
.run();
let stdout = output.combined_output();
let subcommand_descriptions = vec![
"Run a JavaScript or TypeScript program, or a task",
"Run a server",
"Run a task defined in the configuration file",
"Start an interactive Read-Eval-Print Loop (REPL) for Deno",
"Evaluate a script from the command line",
"Add dependencies",
"Installs dependencies either in the local project or globally to a bin directory",
"Uninstalls a dependency or an executable script in the installation root's bin directory",
"Run benchmarks",
"Type-check the dependencies",
"Compile the script into a self contained executable",
"Print coverage reports",
"Generate and show documentation for a module or built-ins",
"Format source files",
"Show info about cache or info related to source file",
"Deno kernel for Jupyter notebooks",
"Lint source files",
"Initialize a new project",
"Run tests",
"Publish the current working directory's package or workspace",
#[cfg(feature = "upgrade")]
"Upgrade deno executable to given version",
];
for description in subcommand_descriptions {
assert_contains!(stdout, description);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/npm_tests.rs | tests/integration/npm_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use test_util as util;
use test_util::eprintln;
use test_util::itest;
use test_util::test;
use url::Url;
use util::TestContextBuilder;
use util::assert_contains;
use util::env_vars_for_npm_tests;
use util::http_server;
// NOTE: See how to make test npm packages at ./testdata/npm/README.md
// FIXME(bartlomieju): npm: specifiers are not handled in dynamic imports
// at the moment
// itest!(dynamic_import {
// args: "run --allow-read --allow-env npm/dynamic_import/main.ts",
// output: "npm/dynamic_import/main.out",
// envs: env_vars_for_npm_tests(),
// http_server: true,
// });
itest!(run_existing_npm_package {
args: "run --allow-read --node-modules-dir=auto npm:@denotest/bin",
output: "npm/run_existing_npm_package/main.out",
envs: env_vars_for_npm_tests(),
http_server: true,
temp_cwd: true,
cwd: Some("npm/run_existing_npm_package/"),
copy_temp_dir: Some("npm/run_existing_npm_package/"),
});
itest!(require_resolve_url_paths {
args: "run -A --quiet --node-modules-dir=auto url_paths.ts",
output: "npm/require_resolve_url/url_paths.out",
envs: env_vars_for_npm_tests(),
http_server: true,
exit_code: 0,
cwd: Some("npm/require_resolve_url/"),
copy_temp_dir: Some("npm/require_resolve_url/"),
});
#[test]
fn parallel_downloading() {
let (out, _err) = util::run_and_collect_output_with_args(
true,
vec![
"run",
"--allow-read",
"--allow-env",
"npm/cjs_with_deps/main.js",
],
None,
// don't use the sync env var
Some(env_vars_for_npm_tests()),
true,
);
assert!(out.contains("chalk cjs loads"));
}
#[test]
fn cached_only_after_first_run() {
let _server = http_server();
let deno_dir = util::new_deno_dir();
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("npm/cached_only_after_first_run/main1.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(stderr, "Download");
assert_contains!(stdout, "[Function: chalk] createChalk");
assert!(output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--cached-only")
.arg("npm/cached_only_after_first_run/main2.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(
stderr,
"npm package not found in cache: \"ansi-styles\", --cached-only is specified."
);
assert!(stdout.is_empty());
assert!(!output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--cached-only")
.arg("npm/cached_only_after_first_run/main1.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert!(output.status.success());
assert!(stderr.is_empty());
assert_contains!(stdout, "[Function: chalk] createChalk");
}
#[test]
fn reload_flag() {
let _server = http_server();
let deno_dir = util::new_deno_dir();
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("npm/reload/main.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(stderr, "Download");
assert_contains!(stdout, "[Function: chalk] createChalk");
assert!(output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--reload")
.arg("npm/reload/main.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(stderr, "Download");
assert_contains!(stdout, "[Function: chalk] createChalk");
assert!(output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--reload=npm:")
.arg("npm/reload/main.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(stderr, "Download");
assert_contains!(stdout, "[Function: chalk] createChalk");
assert!(output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--reload=npm:chalk")
.arg("npm/reload/main.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(stderr, "Download");
assert_contains!(stdout, "[Function: chalk] createChalk");
assert!(output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--reload=npm:foobar")
.arg("npm/reload/main.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert!(stderr.is_empty());
assert_contains!(stdout, "[Function: chalk] createChalk");
assert!(output.status.success());
}
#[test]
fn no_npm_after_first_run() {
let _server = http_server();
let deno_dir = util::new_deno_dir();
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--no-npm")
.arg("npm/no_npm_after_first_run/main1.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(
stderr,
"error: npm specifiers were requested; but --no-npm is specified\n at file:///"
);
assert!(stdout.is_empty());
assert!(!output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("npm/no_npm_after_first_run/main1.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(stderr, "Download");
assert_contains!(stdout, "[Function: chalk] createChalk");
assert!(output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--no-npm")
.arg("npm/no_npm_after_first_run/main1.ts")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
assert_contains!(
stderr,
"error: npm specifiers were requested; but --no-npm is specified\n at file:///"
);
assert!(stdout.is_empty());
assert!(!output.status.success());
}
#[test]
fn deno_run_cjs_module() {
let _server = http_server();
let deno_dir = util::new_deno_dir();
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(deno_dir.path())
.arg("run")
.arg("--allow-read")
.arg("--allow-env")
.arg("--allow-write")
.arg("npm:mkdirp@1.0.4")
.arg("test_dir")
.env("NO_COLOR", "1")
.envs(env_vars_for_npm_tests())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert!(output.status.success());
assert!(deno_dir.path().join("test_dir").exists());
}
#[test]
fn deno_run_bin_lockfile() {
let context = TestContextBuilder::for_npm().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write("deno.json", "{}");
let output = context
.new_command()
.args("run -A --quiet npm:@denotest/bin/cli-esm this is a test")
.run();
output.assert_matches_file("npm/deno_run_esm.out");
assert!(temp_dir.path().join("deno.lock").exists());
}
#[test]
fn node_modules_dir_cache() {
let _server = http_server();
let deno_dir = util::new_deno_dir();
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(deno_dir.path())
.arg("cache")
.arg("--node-modules-dir=auto")
.arg("--quiet")
.arg(util::testdata_path().join("npm/dual_cjs_esm/main.ts"))
.envs(env_vars_for_npm_tests())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert!(output.status.success());
let node_modules = deno_dir.path().join("node_modules");
assert!(
node_modules
.join(
".deno/@denotest+dual-cjs-esm@1.0.0/node_modules/@denotest/dual-cjs-esm"
)
.exists()
);
assert!(node_modules.join("@denotest/dual-cjs-esm").exists());
// now try deleting the folder with the package source in the npm cache dir
let package_global_cache_dir = deno_dir
.path()
.join("npm")
.join("localhost_4260")
.join("@denotest")
.join("dual-cjs-esm")
.join("1.0.0");
assert!(package_global_cache_dir.exists());
std::fs::remove_dir_all(&package_global_cache_dir).unwrap();
// run the output, and it shouldn't bother recreating the directory
// because it already has everything cached locally in the node_modules folder
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(deno_dir.path())
.arg("run")
.arg("--node-modules-dir=auto")
.arg("--quiet")
.arg("-A")
.arg(util::testdata_path().join("npm/dual_cjs_esm/main.ts"))
.envs(env_vars_for_npm_tests())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert!(output.status.success());
// this won't exist, but actually the parent directory
// will because it still re-downloads the registry information
assert!(!package_global_cache_dir.exists());
}
#[test]
fn ensure_registry_files_local() {
// ensures the registry files all point at local tarballs
let registry_dir_path = util::tests_path().join("registry").join("npm");
for entry in walkdir::WalkDir::new(®istry_dir_path).max_depth(2) {
let entry = entry.unwrap();
if entry.metadata().unwrap().is_dir() {
let registry_json_path = entry.path().join("registry.json");
if registry_json_path.exists() {
let file_text = std::fs::read_to_string(®istry_json_path).unwrap();
if file_text.contains(&format!(
"https://registry.npmjs.org/{}/-/",
entry
.path()
.strip_prefix(®istry_dir_path)
.unwrap()
.to_string_lossy()
)) {
panic!(
"file {} contained a reference to the npm registry",
registry_json_path.display()
);
}
}
}
}
}
#[test]
fn lock_file_missing_top_level_package() {
let _server = http_server();
let deno_dir = util::new_deno_dir();
let temp_dir = util::TempDir::new();
// write empty config file
temp_dir.write("deno.json", "{}");
// Lock file that is automatically picked up has been intentionally broken,
// by removing "cowsay" package from it. This test ensures that npm resolver
// snapshot can be successfully hydrated in such situation
let lock_file_content = r#"{
"version": "2",
"remote": {},
"npm": {
"specifiers": { "cowsay": "cowsay@1.5.0" },
"packages": {
"ansi-regex@3.0.1": {
"integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==",
"dependencies": {}
},
"ansi-regex@5.0.1": {
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dependencies": {}
},
"ansi-styles@4.3.0": {
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dependencies": { "color-convert": "color-convert@2.0.1" }
},
"camelcase@5.3.1": {
"integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
"dependencies": {}
},
"cliui@6.0.0": {
"integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==",
"dependencies": {
"string-width": "string-width@4.2.3",
"strip-ansi": "strip-ansi@6.0.1",
"wrap-ansi": "wrap-ansi@6.2.0"
}
},
"color-convert@2.0.1": {
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dependencies": { "color-name": "color-name@1.1.4" }
},
"color-name@1.1.4": {
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dependencies": {}
},
"decamelize@1.2.0": {
"integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==",
"dependencies": {}
},
"emoji-regex@8.0.0": {
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dependencies": {}
},
"find-up@4.1.0": {
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dependencies": {
"locate-path": "locate-path@5.0.0",
"path-exists": "path-exists@4.0.0"
}
},
"get-caller-file@2.0.5": {
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
"dependencies": {}
},
"get-stdin@8.0.0": {
"integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==",
"dependencies": {}
},
"is-fullwidth-code-point@2.0.0": {
"integrity": "sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w==",
"dependencies": {}
},
"is-fullwidth-code-point@3.0.0": {
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dependencies": {}
},
"locate-path@5.0.0": {
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dependencies": { "p-locate": "p-locate@4.1.0" }
},
"p-limit@2.3.0": {
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dependencies": { "p-try": "p-try@2.2.0" }
},
"p-locate@4.1.0": {
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dependencies": { "p-limit": "p-limit@2.3.0" }
},
"p-try@2.2.0": {
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"dependencies": {}
},
"path-exists@4.0.0": {
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dependencies": {}
},
"require-directory@2.1.1": {
"integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
"dependencies": {}
},
"require-main-filename@2.0.0": {
"integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==",
"dependencies": {}
},
"set-blocking@2.0.0": {
"integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==",
"dependencies": {}
},
"string-width@2.1.1": {
"integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==",
"dependencies": {
"is-fullwidth-code-point": "is-fullwidth-code-point@2.0.0",
"strip-ansi": "strip-ansi@4.0.0"
}
},
"string-width@4.2.3": {
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dependencies": {
"emoji-regex": "emoji-regex@8.0.0",
"is-fullwidth-code-point": "is-fullwidth-code-point@3.0.0",
"strip-ansi": "strip-ansi@6.0.1"
}
},
"strip-ansi@4.0.0": {
"integrity": "sha512-4XaJ2zQdCzROZDivEVIDPkcQn8LMFSa8kj8Gxb/Lnwzv9A8VctNZ+lfivC/sV3ivW8ElJTERXZoPBRrZKkNKow==",
"dependencies": { "ansi-regex": "ansi-regex@3.0.1" }
},
"strip-ansi@6.0.1": {
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dependencies": { "ansi-regex": "ansi-regex@5.0.1" }
},
"strip-final-newline@2.0.0": {
"integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
"dependencies": {}
},
"which-module@2.0.0": {
"integrity": "sha512-B+enWhmw6cjfVC7kS8Pj9pCrKSc5txArRyaYGe088shv/FGWH+0Rjx/xPgtsWfsUtS27FkP697E4DDhgrgoc0Q==",
"dependencies": {}
},
"wrap-ansi@6.2.0": {
"integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
"dependencies": {
"ansi-styles": "ansi-styles@4.3.0",
"string-width": "string-width@4.2.3",
"strip-ansi": "strip-ansi@6.0.1"
}
},
"y18n@4.0.3": {
"integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==",
"dependencies": {}
},
"yargs-parser@18.1.3": {
"integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==",
"dependencies": {
"camelcase": "camelcase@5.3.1",
"decamelize": "decamelize@1.2.0"
}
},
"yargs@15.4.1": {
"integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==",
"dependencies": {
"cliui": "cliui@6.0.0",
"decamelize": "decamelize@1.2.0",
"find-up": "find-up@4.1.0",
"get-caller-file": "get-caller-file@2.0.5",
"require-directory": "require-directory@2.1.1",
"require-main-filename": "require-main-filename@2.0.0",
"set-blocking": "set-blocking@2.0.0",
"string-width": "string-width@4.2.3",
"which-module": "which-module@2.0.0",
"y18n": "y18n@4.0.3",
"yargs-parser": "yargs-parser@18.1.3"
}
}
}
}
}
"#;
temp_dir.write("deno.lock", lock_file_content);
let main_contents = r#"
import cowsay from "npm:cowsay";
console.log(cowsay);
"#;
temp_dir.write("main.ts", main_contents);
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("run")
.arg("--quiet")
.arg("--lock")
.arg("deno.lock")
.arg("-A")
.arg("main.ts")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert!(!output.status.success());
let stderr = String::from_utf8(output.stderr).unwrap();
test_util::assertions::assert_wildcard_match(
&stderr,
concat!(
"error: failed reading lockfile '[WILDLINE]deno.lock'\n",
"\n",
"Caused by:\n",
" 0: The lockfile is corrupt. Remove the lockfile to regenerate it.\n",
" 1: Could not find 'cowsay@1.5.0' in the list of packages.\n"
),
);
}
#[test]
fn lock_file_lock_write() {
// https://github.com/denoland/deno/issues/16666
let _server = http_server();
let deno_dir = util::new_deno_dir();
let temp_dir = util::TempDir::new();
temp_dir.write("deno.json", "{}");
let lock_file_content = r#"{
"version": "5",
"specifiers": {
"npm:cowsay@1.5.0": "1.5.0"
},
"npm": {
"ansi-regex@3.0.1": {
"integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw=="
},
"ansi-regex@5.0.1": {
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="
},
"ansi-styles@4.3.0": {
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dependencies": [
"color-convert"
]
},
"camelcase@5.3.1": {
"integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg=="
},
"cliui@6.0.0": {
"integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==",
"dependencies": [
"string-width@4.2.3",
"strip-ansi@6.0.1",
"wrap-ansi"
]
},
"color-convert@2.0.1": {
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dependencies": [
"color-name"
]
},
"color-name@1.1.4": {
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
},
"cowsay@1.5.0": {
"integrity": "sha512-8Ipzr54Z8zROr/62C8f0PdhQcDusS05gKTS87xxdji8VbWefWly0k8BwGK7+VqamOrkv3eGsCkPtvlHzrhWsCA==",
"dependencies": [
"get-stdin",
"string-width@2.1.1",
"strip-final-newline",
"yargs"
]
},
"decamelize@1.2.0": {
"integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA=="
},
"emoji-regex@8.0.0": {
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
},
"find-up@4.1.0": {
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dependencies": [
"locate-path",
"path-exists"
]
},
"get-caller-file@2.0.5": {
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="
},
"get-stdin@8.0.0": {
"integrity": "sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg=="
},
"is-fullwidth-code-point@2.0.0": {
"integrity": "sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w=="
},
"is-fullwidth-code-point@3.0.0": {
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="
},
"locate-path@5.0.0": {
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dependencies": [
"p-locate"
]
},
"p-limit@2.3.0": {
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dependencies": [
"p-try"
]
},
"p-locate@4.1.0": {
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dependencies": [
"p-limit"
]
},
"p-try@2.2.0": {
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ=="
},
"path-exists@4.0.0": {
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="
},
"require-directory@2.1.1": {
"integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="
},
"require-main-filename@2.0.0": {
"integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="
},
"set-blocking@2.0.0": {
"integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw=="
},
"string-width@2.1.1": {
"integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==",
"dependencies": [
"is-fullwidth-code-point@2.0.0",
"strip-ansi@4.0.0"
]
},
"string-width@4.2.3": {
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dependencies": [
"emoji-regex",
"is-fullwidth-code-point@3.0.0",
"strip-ansi@6.0.1"
]
},
"strip-ansi@4.0.0": {
"integrity": "sha512-4XaJ2zQdCzROZDivEVIDPkcQn8LMFSa8kj8Gxb/Lnwzv9A8VctNZ+lfivC/sV3ivW8ElJTERXZoPBRrZKkNKow==",
"dependencies": [
"ansi-regex@3.0.1"
]
},
"strip-ansi@6.0.1": {
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dependencies": [
"ansi-regex@5.0.1"
]
},
"strip-final-newline@2.0.0": {
"integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA=="
},
"which-module@2.0.0": {
"integrity": "sha512-B+enWhmw6cjfVC7kS8Pj9pCrKSc5txArRyaYGe088shv/FGWH+0Rjx/xPgtsWfsUtS27FkP697E4DDhgrgoc0Q=="
},
"wrap-ansi@6.2.0": {
"integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
"dependencies": [
"ansi-styles",
"string-width@4.2.3",
"strip-ansi@6.0.1"
]
},
"y18n@4.0.3": {
"integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ=="
},
"yargs-parser@18.1.3": {
"integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==",
"dependencies": [
"camelcase",
"decamelize"
]
},
"yargs@15.4.1": {
"integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==",
"dependencies": [
"cliui",
"decamelize",
"find-up",
"get-caller-file",
"require-directory",
"require-main-filename",
"set-blocking",
"string-width@4.2.3",
"which-module",
"y18n",
"yargs-parser"
]
}
}
}
"#;
temp_dir.write("deno.lock", lock_file_content);
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("cache")
.arg("--quiet")
.arg("npm:cowsay@1.5.0")
.envs(env_vars_for_npm_tests())
.piped_output()
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
eprintln!(
"output: {}",
String::from_utf8(output.stderr.clone()).unwrap()
);
assert!(output.status.success());
assert_eq!(output.status.code(), Some(0));
let stdout = String::from_utf8(output.stdout).unwrap();
assert!(stdout.is_empty());
let stderr = String::from_utf8(output.stderr).unwrap();
assert!(stderr.is_empty());
assert_eq!(
std::fs::read_to_string(temp_dir.path().join("deno.lock")).unwrap(),
lock_file_content,
);
}
#[test]
fn auto_discover_lock_file() {
let context = TestContextBuilder::for_npm().use_temp_cwd().build();
let temp_dir = context.temp_dir();
// write empty config file
temp_dir.write("deno.json", "{}");
// write a lock file with borked integrity
let lock_file_content = r#"{
"version": "5",
"specifiers": {
"npm:@denotest/bin": "1.0.0"
},
"npm": {
"@denotest/bin@1.0.0": {
"integrity": "sha512-foobar",
"tarball": "http://localhost:4260/@denotest/bin/1.0.0.tgz"
}
}
}"#;
temp_dir.write("deno.lock", lock_file_content);
let output = context
.new_command()
.args("run -A npm:@denotest/bin/cli-esm test")
.run();
output
.assert_matches_text(
r#"Download http://localhost:4260/@denotest/bin/1.0.0.tgz
error: Failed caching npm package '@denotest/bin@1.0.0'
Caused by:
Tarball checksum did not match what was provided by npm registry for @denotest/bin@1.0.0.
Expected: foobar
Actual: [WILDCARD]
"#)
.assert_exit_code(1);
}
// TODO(2.0): this should be rewritten to a spec test and first run `deno install`
// itest!(node_modules_import_run {
// args: "run --quiet main.ts",
// output: "npm/node_modules_import/main.out",
// http_server: true,
// copy_temp_dir: Some("npm/node_modules_import/"),
// cwd: Some("npm/node_modules_import/"),
// envs: env_vars_for_npm_tests(),
// exit_code: 0,
// });
// TODO(2.0): this should be rewritten to a spec test and first run `deno install`
// itest!(node_modules_import_check {
// args: "check --quiet main.ts",
// output: "npm/node_modules_import/main_check.out",
// envs: env_vars_for_npm_tests(),
// http_server: true,
// cwd: Some("npm/node_modules_import/"),
// copy_temp_dir: Some("npm/node_modules_import/"),
// exit_code: 1,
// });
// TODO(2.0): this should be rewritten to a spec test and first run `deno install`
#[test]
#[ignore]
fn reload_info_not_found_cache_but_exists_remote() {
fn remove_version(registry_json: &mut Value, version: &str) {
registry_json
.as_object_mut()
.unwrap()
.get_mut("versions")
.unwrap()
.as_object_mut()
.unwrap()
.remove(version);
}
fn remove_version_for_package(
deno_dir: &util::TempDir,
package: &str,
version: &str,
) {
let registry_json_path =
format!("npm/localhost_4260/{}/registry.json", package);
let mut registry_json: Value =
serde_json::from_str(&deno_dir.read_to_string(®istry_json_path))
.unwrap();
remove_version(&mut registry_json, version);
// for the purpose of this test, just remove the dist-tag as it might contain this version
registry_json
.as_object_mut()
.unwrap()
.get_mut("dist-tags")
.unwrap()
.as_object_mut()
.unwrap()
.remove("latest");
deno_dir.write(
®istry_json_path,
serde_json::to_string(®istry_json).unwrap(),
);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/upgrade_tests.rs | tests/integration/upgrade_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::process::Command;
use std::process::Stdio;
use std::time::Instant;
use test_util as util;
use util::TestContext;
use util::TestContextBuilder;
use util::assert_starts_with;
use util::test;
#[test(flaky)]
fn upgrade_invalid_lockfile() {
let context = upgrade_context();
let temp_dir = context.temp_dir();
temp_dir.write("deno.deno", r#"{ \"lock\": true }"#);
temp_dir.write(
"deno.lock",
r#"{
"version": "invalid",
}"#,
);
let exe_path = temp_dir.path().join("deno");
util::deno_exe_path().copy(&exe_path);
assert!(exe_path.exists());
exe_path.mark_executable();
let output = Command::new(&exe_path)
.arg("upgrade")
.arg("--version")
.arg("foobar")
.arg("--dry-run")
.stderr(Stdio::piped())
.spawn()
.unwrap()
.wait_with_output()
.unwrap();
assert!(!output.status.success());
// should make it here instead of erroring on an invalid lockfile
assert_starts_with!(
&util::strip_ansi_codes(&String::from_utf8(output.stderr.clone()).unwrap())
.to_string(),
"error: Invalid version passed (foobar)"
);
}
#[test(flaky)]
fn upgrade_prompt() {
let context = upgrade_context();
let temp_dir = context.temp_dir();
// start a task that goes indefinitely in order to allow
// the upgrade check to occur
temp_dir.write("main.js", "setInterval(() => {}, 1_000)");
let cmd = context
.new_command()
.args("run --log-level=debug main.js")
.env_remove("DENO_NO_UPDATE_CHECK");
// run once and wait for the version to be stored
cmd.with_pty(|mut pty| {
pty.expect("Finished upgrade checker.");
});
// now check that the upgrade prompt is shown the next time this is run
temp_dir.write("main.js", "");
cmd.with_pty(|mut pty| {
// - We need to use a pty here because the upgrade prompt
// doesn't occur except when there's a pty.
// - Version comes from the test server.
pty.expect_any(&[
" 99999.99.99 Run `deno upgrade` to install it.",
// it builds canary releases on main, so check for this in that case
"Run `deno upgrade canary` to install it.",
]);
});
}
#[test(flaky)]
fn upgrade_lsp_repl_sleeps() {
let context = TestContextBuilder::new()
.use_http_server()
.use_temp_cwd()
.env(
"DENO_DONT_USE_INTERNAL_BASE_UPGRADE_URL",
"http://localhost:4545/upgrade/sleep",
)
.build();
let start_instant = Instant::now();
// ensure this works even though the upgrade check is taking
// a long time to complete
context
.new_command()
.args("repl")
.env_remove("DENO_NO_UPDATE_CHECK")
.with_pty(|mut pty| {
pty.write_line("123 + 456\n");
pty.expect("579");
});
// the test server will sleep for 95 seconds, so ensure this is less
let elapsed_secs = start_instant.elapsed().as_secs();
assert!(elapsed_secs < 94, "elapsed_secs: {}", elapsed_secs);
}
fn upgrade_context() -> TestContext {
TestContextBuilder::new()
.use_http_server()
.use_temp_cwd()
.env(
"DENO_DONT_USE_INTERNAL_BASE_UPGRADE_URL",
"http://localhost:4545",
)
.build()
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/repl_tests.rs | tests/integration/repl_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use test_util as util;
use test_util::test;
use util::TempDir;
use util::TestContext;
use util::TestContextBuilder;
use util::assert_contains;
use util::assert_ends_with;
use util::assert_not_contains;
#[test(flaky)]
fn pty_multiline() {
util::with_pty(&["repl"], |mut console| {
console.write_line("(\n1 + 2\n)");
console.expect("3");
console.write_line("{\nfoo: \"foo\"\n}");
console.expect("{ foo: \"foo\" }");
console.write_line("`\nfoo\n`");
console.expect("\"\\nfoo\\n\"");
console.write_line("`\n\\`\n`");
console.expect(r#""\n`\n""#);
console.write_line("'{'");
console.expect(r#""{""#);
console.write_line("'('");
console.expect(r#""(""#);
console.write_line("'['");
console.expect(r#""[""#);
console.write_line("/{/");
console.expect("/{/");
console.write_line("/\\(/");
console.expect("/\\(/");
console.write_line("/\\[/");
console.expect("/\\[/");
console.write_line("console.log(\"{test1} abc {test2} def {{test3}}\".match(/{([^{].+?)}/));");
console.expect("[");
console.expect(" \"{test1}\",");
console.expect(" \"test1\",");
console.expect(" index: 0,");
console.expect(" input: \"{test1} abc {test2} def {{test3}}\",");
console.expect(" groups: undefined");
console.expect("]");
});
}
#[test(flaky)]
fn pty_null() {
util::with_pty(&["repl"], |mut console| {
console.write_line("null");
console.expect("null");
});
}
#[test(flaky)]
fn pty_unpaired_braces() {
for right_brace in &[")", "]", "}"] {
util::with_pty(&["repl"], |mut console| {
console.write_line(right_brace);
console.expect("parse error: Expression expected");
});
}
}
#[test(flaky)]
fn pty_bad_input() {
util::with_pty(&["repl"], |mut console| {
console.write_line("'\\u{1f3b5}'[0]");
console.expect("Unterminated string literal");
});
}
#[test(flaky)]
fn pty_syntax_error_input() {
util::with_pty(&["repl"], |mut console| {
console.write_line("('\\u')");
console.expect("Bad character escape sequence, expected 4 hex characters");
console.write_line("'");
console.expect("Unterminated string constant");
console.write_line("[{'a'}];");
console.expect("Expected a semicolon");
});
}
#[test(flaky)]
fn pty_complete_symbol() {
util::with_pty(&["repl"], |mut console| {
console.write_line_raw("Symbol.it\t");
console.expect("Symbol(Symbol.iterator)");
});
}
#[test(flaky)]
fn pty_complete_declarations() {
util::with_pty(&["repl"], |mut console| {
console.write_line("class MyClass {}");
console.expect("undefined");
console.write_line_raw("My\t");
console.expect("[class MyClass]");
console.write_line("let myVar = 2 + 3;");
console.expect("undefined");
console.write_line_raw("myV\t");
console.expect("5");
});
}
#[test(flaky)]
fn pty_complete_primitives() {
util::with_pty(&["repl"], |mut console| {
console.write_line("let func = function test(){}");
console.expect("undefined");
console.write_line_raw("func.appl\t");
console.expect("func.apply");
console.write_line("let str = ''");
console.expect("undefined");
console.write_line_raw("str.leng\t");
console.expect("str.length");
console.write_line_raw("false.valueO\t");
console.expect("false.valueOf");
console.write_line_raw("5n.valueO\t");
console.expect("5n.valueOf");
console.write_line("let num = 5");
console.expect("undefined");
console.write_line_raw("num.toStrin\t");
console.expect("num.toString");
});
}
#[test(flaky)]
fn pty_complete_expression() {
util::with_pty(&["repl"], |mut console| {
console.write_raw("Deno.\t\t");
console.expect("Display all");
console.write_raw("y");
console.expect_all(&["symlink", "args", "permissions", "exit"]);
});
}
#[test(flaky)]
fn pty_complete_imports() {
let context = TestContextBuilder::default().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.create_dir_all("subdir");
temp_dir.write("./subdir/my_file.ts", "");
temp_dir.create_dir_all("run");
temp_dir.write("./run/hello.ts", "console.log('Hello World');");
temp_dir.write(
"./run/output.ts",
r#"export function output(text: string) {
console.log(text);
}
"#,
);
context
.new_command()
.args_vec(["repl", "-A"])
.with_pty(|mut console| {
// single quotes
console.write_line_raw("import './run/hel\t'");
console.expect("Hello World");
// double quotes
console.write_line_raw("import { output } from \"./run/out\t\"");
console.expect("\"./run/output.ts\"");
console.write_line_raw("output('testing output');");
console.expect("testing output");
});
// ensure when the directory changes that the suggestions come from the cwd
context
.new_command()
.args_vec(["repl", "-A"])
.with_pty(|mut console| {
console.write_line("Deno.chdir('./subdir');");
console.expect("undefined");
console.write_line_raw("import '../run/he\t'");
console.expect("Hello World");
});
}
#[test(flaky)]
fn pty_complete_imports_no_panic_empty_specifier() {
// does not panic when tabbing when empty
util::with_pty(&["repl", "-A"], |mut console| {
if cfg!(windows) {
console.write_line_raw("import '\t'");
console.expect_any(&["not prefixed with", "https://deno.land"]);
} else {
console.write_raw("import '\t");
console.expect("import 'https://deno.land");
}
});
}
#[test(flaky)]
fn pty_ignore_symbols() {
util::with_pty(&["repl"], |mut console| {
console.write_line_raw("Array.Symbol\t");
console.expect("undefined");
});
}
#[test(flaky)]
fn pty_assign_global_this() {
util::with_pty(&["repl"], |mut console| {
console.write_line("globalThis = 40 + 2;");
console.expect("42");
});
}
#[test(flaky)]
fn pty_assign_deno_keys_and_deno() {
util::with_pty(&["repl"], |mut console| {
console.write_line(
"Object.keys(Deno).forEach((key)=>{try{Deno[key] = undefined} catch {}})",
);
console.expect("undefined");
console.write_line("delete globalThis.Deno");
console.expect("true");
console.write_line("console.log('testing ' + 'this out');");
console.expect("testing this out");
console.expect("undefined");
});
}
#[test(flaky)]
fn pty_internal_repl() {
util::with_pty(&["repl"], |mut console| {
console.write_line("'Length: ' + Object.keys(globalThis).filter(k => k.startsWith('__DENO_')).length;");
console.expect("Length: 0");
console.write_line_raw("__\t\t");
console.expect("> __");
let output = console.read_until("> __");
assert_contains!(output, "__defineGetter__");
// should not contain the internal repl variable
// in the `globalThis` or completions output
assert_not_contains!(output, "__DENO_");
});
}
#[test(flaky)]
fn pty_emoji() {
// windows was having issues displaying this
util::with_pty(&["repl"], |mut console| {
console.write_line(r"console.log('\u{1F995}');");
console.expect("🦕");
});
}
#[test(flaky)]
fn console_log() {
util::with_pty(&["repl"], |mut console| {
console.write_line("console.log('hello');");
console.expect("hello");
console.write_line("'world'");
console.expect("\"world\"");
});
// https://github.com/denoland/deno/issues/21428
let (out, err) = util::run_and_collect_output_with_args(
true,
vec![
"repl",
"--eval-file=./../specs/repl/console_log/093_console_log_format.js",
],
None,
None,
false,
);
assert_contains!(out, "0.5");
assert!(err.is_empty());
}
#[test(flaky)]
fn object_literal() {
util::with_pty(&["repl"], |mut console| {
console.write_line("{}");
console.expect("{}");
console.write_line("{ foo: 'bar' }");
console.expect("{ foo: \"bar\" }");
});
}
#[test(flaky)]
fn block_expression() {
util::with_pty(&["repl"], |mut console| {
console.write_line("{};");
console.expect("undefined");
console.write_line("{\"\"}");
console.expect("\"\"");
});
}
#[test(flaky)]
fn await_resolve() {
util::with_pty(&["repl"], |mut console| {
console.write_line("await Promise.resolve('done')");
console.expect("\"done\"");
});
}
#[test(flaky)]
fn await_timeout() {
util::with_pty(&["repl"], |mut console| {
console.write_line("await new Promise((r) => setTimeout(r, 0, 'done'))");
console.expect("\"done\"");
});
}
#[test(flaky)]
fn let_redeclaration() {
util::with_pty(&["repl"], |mut console| {
console.write_line("let foo = 0;");
console.expect("undefined");
console.write_line("foo");
console.expect("0");
console.write_line("let foo = 1;");
console.expect("undefined");
console.write_line("foo");
console.expect("1");
});
}
#[test(flaky)]
fn repl_cwd() {
let context = TestContextBuilder::default().use_temp_cwd().build();
let temp_dir = context.temp_dir();
context
.new_command()
.args_vec(["repl", "-A"])
.with_pty(|mut console| {
console.write_line("Deno.cwd()");
console.expect(
temp_dir
.path()
.as_path()
.file_name()
.unwrap()
.to_str()
.unwrap(),
);
});
}
#[test(flaky)]
fn typescript() {
util::with_pty(&["repl"], |mut console| {
console.write_line("function add(a: number, b: number) { return a + b }");
console.expect("undefined");
console.write_line("const result: number = add(1, 2) as number;");
console.expect("undefined");
console.write_line("result");
console.expect("3");
});
}
#[test(flaky)]
fn typescript_declarations() {
util::with_pty(&["repl"], |mut console| {
console.write_line("namespace Test { export enum Values { A, B, C } }");
console.expect("undefined");
console.write_line("Test.Values.A");
console.expect("0");
console.write_line("Test.Values.C");
console.expect("2");
console.write_line("interface MyInterface { prop: string; }");
console.expect("undefined");
console.write_line("type MyTypeAlias = string;");
console.expect("undefined");
});
}
#[test(flaky)]
fn typescript_decorators() {
let context = TestContextBuilder::default().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write(
"./deno.json",
r#"{ "compilerOptions": { "experimentalDecorators": true } }"#,
);
let config_path = temp_dir.target_path().join("./deno.json");
util::with_pty(
&["repl", "--config", config_path.to_string_lossy().as_ref()],
|mut console| {
console.write_line(
"function dec(target) { target.prototype.test = () => 2; }",
);
console.expect("undefined");
console.write_line("@dec class Test {}");
console.expect("[class Test]");
console.write_line("new Test().test()");
console.expect("2");
},
);
}
#[test(flaky)]
fn eof() {
util::with_pty(&["repl"], |mut console| {
console.write_line("1 + 2");
console.expect("3");
});
}
#[test(flaky)]
fn strict() {
util::with_pty(&["repl"], |mut console| {
console.write_line("let a = {};");
console.expect("undefined");
console.write_line("Object.preventExtensions(a)");
console.expect("{}");
console.write_line("a.c = 1;");
console.expect(
"Uncaught TypeError: Cannot add property c, object is not extensible",
);
});
}
#[test(flaky)]
fn close_command() {
let (out, err) = util::run_and_collect_output(
true,
"repl",
Some(vec!["close()", "'ignored'"]),
None,
false,
);
assert_not_contains!(out, "ignored");
assert!(err.is_empty());
}
#[test(flaky)]
fn function() {
util::with_pty(&["repl"], |mut console| {
console.write_line("Deno.writeFileSync");
console.expect("[Function: writeFileSync]");
});
}
#[test(flaky)]
fn multiline() {
util::with_pty(&["repl"], |mut console| {
console.write_line("(\n1 + 2\n)");
console.expect("3");
});
}
#[test(flaky)]
fn import() {
let context = TestContextBuilder::default()
.use_copy_temp_dir("./subdir")
.build();
context
.new_command()
.args_vec(["repl", "-A"])
.with_pty(|mut console| {
console.write_line("import('./subdir/auto_print_hello.ts')");
console.expect("hello!");
});
}
#[test(flaky)]
fn import_declarations() {
let context = TestContextBuilder::default()
.use_copy_temp_dir("./subdir")
.build();
context
.new_command()
.args_vec(["repl", "-A"])
.with_pty(|mut console| {
console.write_line("import './subdir/auto_print_hello.ts'");
console.expect("hello!");
});
}
#[test(flaky)]
fn exports_stripped() {
util::with_pty(&["repl"], |mut console| {
console.write_line("const test = 5 + 1; export default test;");
console.expect("6");
console.write_line("export class Test {}");
console.expect("undefined");
});
}
#[test(flaky)]
fn call_eval_unterminated() {
util::with_pty(&["repl"], |mut console| {
console.write_line("eval('{')");
console.expect("Unexpected end of input");
});
}
#[test(flaky)]
fn unpaired_braces() {
util::with_pty(&["repl"], |mut console| {
for right_brace in &[")", "]", "}"] {
console.write_line(right_brace);
console.expect("Expression expected");
}
});
}
#[test(flaky)]
fn reference_error() {
util::with_pty(&["repl"], |mut console| {
console.write_line("not_a_variable");
console.expect("not_a_variable is not defined");
});
}
#[test(flaky)]
fn syntax_error() {
util::with_pty(&["repl"], |mut console| {
console.write_line("syntax error");
console.expect("parse error: Expected ';', '}' or <eof>");
// ensure it keeps accepting input after
console.write_line("7 * 6");
console.expect("42");
});
}
#[test(flaky)]
fn jsx_errors_without_pragma() {
util::with_pty(&["repl"], |mut console| {
console.write_line("const element = <div />;");
console.expect("React is not defined");
});
}
#[test(flaky)]
fn jsx_import_source() {
let context = TestContextBuilder::default()
.use_temp_cwd()
.use_http_server()
.build();
context
.new_command()
.args_vec(["repl", "-A"])
.with_pty(|mut console| {
console.write_line("/** @jsxImportSource http://localhost:4545/jsx */");
console.expect("undefined");
console.write_line("const element = <div />;");
console.expect("undefined");
});
}
#[test(flaky)]
fn type_error() {
util::with_pty(&["repl"], |mut console| {
console.write_line("console()");
console.expect("console is not a function");
});
}
#[test(flaky)]
fn variable() {
util::with_pty(&["repl"], |mut console| {
console.write_line("var a = 123 + 456;");
console.expect("undefined");
console.write_line("a");
console.expect("579");
});
}
#[test(flaky)]
fn lexical_scoped_variable() {
util::with_pty(&["repl"], |mut console| {
console.write_line("let a = 123 + 456;");
console.expect("undefined");
console.write_line("a");
console.expect("579");
});
}
#[test(flaky)]
fn missing_deno_dir() {
use std::fs::read_dir;
let temp_dir = TempDir::new();
let deno_dir_path = temp_dir.path().join("deno");
let (out, err) = util::run_and_collect_output(
true,
"repl",
Some(vec!["1"]),
Some(vec![
("DENO_DIR".to_owned(), deno_dir_path.to_string()),
("NO_COLOR".to_owned(), "1".to_owned()),
]),
false,
);
assert!(read_dir(deno_dir_path).is_ok());
assert_ends_with!(out, "1\n");
assert!(err.is_empty());
}
#[test(flaky)]
fn custom_history_path() {
use std::fs::read;
let temp_dir = TempDir::new();
let history_path = temp_dir.path().join("history.txt");
let (out, err) = util::run_and_collect_output(
true,
"repl",
Some(vec!["1"]),
Some(vec![
("DENO_REPL_HISTORY".to_owned(), history_path.to_string()),
("NO_COLOR".to_owned(), "1".to_owned()),
]),
false,
);
assert!(read(&history_path).is_ok());
assert_ends_with!(out, "1\n");
assert!(err.is_empty());
}
#[test(flaky)]
fn disable_history_file() {
let deno_dir = util::new_deno_dir();
let default_history_path = deno_dir.path().join("deno_history.txt");
let (out, err) = util::run_and_collect_output(
true,
"repl",
Some(vec!["1"]),
Some(vec![
("DENO_DIR".to_owned(), deno_dir.path().to_string()),
("DENO_REPL_HISTORY".to_owned(), "".to_owned()),
("NO_COLOR".to_owned(), "1".to_owned()),
]),
false,
);
assert!(!default_history_path.try_exists().unwrap());
assert_ends_with!(out, "1\n");
assert!(err.is_empty());
}
#[test(flaky)]
fn save_last_eval() {
util::with_pty(&["repl"], |mut console| {
console.write_line("1 + 2");
console.expect("3");
console.write_line("_ + 3");
console.expect("6");
});
}
#[test(flaky)]
fn save_last_thrown() {
util::with_pty(&["repl"], |mut console| {
console.write_line("throw 1 + 2");
console.expect("Uncaught 3");
console.write_line("_error + 3");
console.expect("6");
});
}
#[test(flaky)]
fn assign_underscore() {
util::with_pty(&["repl"], |mut console| {
console.write_line("_ = 1");
console.expect("Last evaluation result is no longer saved to _.");
console.write_line("2 + 3");
console.expect("5");
console.write_line("_");
console.expect("1");
});
}
#[test(flaky)]
fn assign_underscore_error() {
util::with_pty(&["repl"], |mut console| {
console.write_line("_error = 1");
console.expect("Last thrown error is no longer saved to _error.");
console.write_line("throw 2");
console.expect("Uncaught 2");
console.write_line("_error");
console.expect("1");
});
}
#[test(flaky)]
fn custom_inspect() {
util::with_pty(&["repl"], |mut console| {
console.write_line(
r#"const o = {
[Symbol.for("Deno.customInspect")]() {
throw new Error('Oops custom inspect error');
},
};"#,
);
console.expect("undefined");
console.write_line("o");
console.expect("Oops custom inspect error");
});
}
#[test(flaky)]
fn eval_flag_valid_input() {
util::with_pty(&["repl", "--eval", "const t = 10;"], |mut console| {
console.write_line("t * 500");
console.expect("5000");
});
}
#[test(flaky)]
fn eval_flag_parse_error() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--eval", "const %"],
Some(vec!["250 * 10"]),
None,
false,
);
assert_contains!(
test_util::strip_ansi_codes(&out),
"Error in --eval flag: parse error: Unexpected token `%`."
);
assert_contains!(out, "2500"); // should not prevent input
assert!(err.is_empty());
}
#[test(flaky)]
fn eval_flag_runtime_error() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--eval", "throw new Error('Testing')"],
Some(vec!["250 * 10"]),
None,
false,
);
assert_contains!(out, "Error in --eval flag: Uncaught Error: Testing");
assert_contains!(out, "2500"); // should not prevent input
assert!(err.is_empty());
}
#[test(flaky)]
fn eval_file_flag_valid_input() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--eval-file=./run/001_hello.js"],
None,
None,
false,
);
assert_contains!(out, "Hello World");
assert!(err.is_empty());
}
#[test(flaky)]
fn eval_file_flag_call_defined_function() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--eval-file=./tsc/d.ts"],
Some(vec!["v4()"]),
None,
false,
);
assert_contains!(out, "hello");
assert!(err.is_empty());
}
#[test(flaky)]
fn eval_file_flag_http_input() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--eval-file=http://127.0.0.1:4545/tsc/d.ts"],
Some(vec!["v4()"]),
None,
true,
);
assert_contains!(out, "hello");
assert!(err.contains("Download"));
}
#[test(flaky)]
fn eval_file_flag_multiple_files() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec![
"repl",
"--allow-read",
"--eval-file=http://127.0.0.1:4545/repl/import_type.ts,./tsc/d.ts,http://127.0.0.1:4545/type_definitions/foo.js",
],
Some(vec!["b.method1=v4", "b.method1()+foo.toUpperCase()"]),
None,
true,
);
assert_contains!(out, "helloFOO");
assert_contains!(err, "Download");
}
#[test(flaky)]
fn pty_clear_function() {
util::with_pty(&["repl"], |mut console| {
console.write_line("console.log('h' + 'ello');");
console.expect_all(&["hello", "undefined"]);
console.write_line_raw("clear();");
if cfg!(windows) {
// expect a bunch of these in the output
console.expect_raw_in_current_output(
"\r\n\u{1b}[K\r\n\u{1b}[K\r\n\u{1b}[K\r\n\u{1b}[K\r\n\u{1b}[K",
);
} else {
console.expect_raw_in_current_output("[1;1H");
}
console.expect("undefined"); // advance past the "clear()"'s undefined
console.expect(">");
console.write_line("const clear = 1234 + 2000;");
console.expect("undefined");
console.write_line("clear;");
console.expect("3234");
});
}
#[test(flaky)]
fn pty_tab_handler() {
// If the last character is **not** whitespace, we show the completions
util::with_pty(&["repl"], |mut console| {
console.write_raw("a\t\t");
console.expect_all(&["addEventListener", "alert", "atob"]);
});
// If the last character is whitespace, we just insert a tab
util::with_pty(&["repl"], |mut console| {
console.write_line("const a = 5;");
console.expect("undefined");
console.write_raw("a; \t\ta + 2;\n"); // last character is whitespace
console.expect_any(&[
// windows
"a; a + 2;",
// unix
"a; \t\ta + 2;",
]);
});
}
#[test(flaky)]
fn repl_error() {
util::with_pty(&["repl"], |mut console| {
console.write_line("console.log(1);");
console.expect_all(&["1", "undefined"]);
console.write_line(r#"throw new Error("foo");"#);
console.expect("Uncaught Error: foo");
console.expect(" at <anonymous>");
console.write_line("console.log(2);");
console.expect("2");
});
}
#[test(flaky)]
fn repl_reject() {
util::with_pty(&["repl"], |mut console| {
console.write_line("console.log(1);");
console.expect_all(&["1", "undefined"]);
console.write_line(r#"Promise.reject(new Error("foo"));"#);
console.expect("Promise {");
console.expect(" <rejected> Error: foo");
console.expect("Uncaught Error: foo");
console.expect(" at <anonymous>");
console.write_line("console.log(2);");
console.expect("2");
console.write_line(r#"throw "hello";"#);
console.expect(r#"Uncaught "hello""#);
console.write_line(r#"throw `hello ${"world"}`;"#);
console.expect(r#"Uncaught "hello world""#);
});
}
#[test(flaky)]
fn repl_report_error() {
util::with_pty(&["repl"], |mut console| {
console.write_line("console.log(1);");
console.expect_all(&["1", "undefined"]);
console.write_line(r#"reportError(new Error("foo"));"#);
console.expect("undefined");
console.expect("Uncaught Error: foo");
console.expect(" at <anonymous>");
console.write_line("console.log(2);");
console.expect("2");
});
}
#[test(flaky)]
fn repl_error_undefined() {
util::with_pty(&["repl"], |mut console| {
console.write_line(r#"throw undefined;"#);
console.expect("Uncaught undefined");
console.write_line(r#"Promise.reject();"#);
console.expect("Promise { <rejected> undefined }");
console.expect("Uncaught undefined");
console.write_line(r#"reportError(undefined);"#);
console.expect("undefined");
console.expect("Uncaught undefined");
});
}
#[test(flaky)]
fn pty_aggregate_error() {
util::with_pty(&["repl"], |mut console| {
console.write_line("await Promise.any([])");
console.expect("AggregateError");
});
}
#[test(flaky)]
fn repl_with_quiet_flag() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--quiet"],
Some(vec!["await Promise.resolve('done')"]),
Some(vec![("NO_COLOR".to_owned(), "1".to_owned())]),
false,
);
assert!(!out.contains("Deno"));
assert!(!out.contains("exit using ctrl+d, ctrl+c, or close()"));
assert_ends_with!(out, "\"done\"\n");
assert!(err.is_empty(), "Error: {}", err);
}
#[test(flaky)]
fn repl_deno_test() {
util::with_pty(&["repl"], |mut console| {
console.write_line_raw(
"\
console.log('Hello from outside of test!'); \
Deno.test('test1', async (t) => { \
console.log('Hello from inside of test!'); \
await t.step('step1', () => {}); \
}); \
Deno.test('test2', () => { \
throw new Error('some message'); \
}); \
console.log('Hello again from outside of test!'); \
",
);
console.expect("Hello from outside of test!");
console.expect("Hello again from outside of test!");
// FIXME(nayeemrmn): REPL unit tests don't support output capturing.
console.expect("Hello from inside of test!");
console.expect(" step1 ... ok (");
console.expect("test1 ... ok (");
console.expect("test2 ... FAILED (");
console.expect("ERRORS");
console.expect("test2 => <anonymous>:6:6");
console.expect("error: Error: some message");
console.expect(" at <anonymous>:7:9");
console.expect("FAILURES");
console.expect("test2 => <anonymous>:6:6");
console.expect("FAILED | 1 passed (1 step) | 1 failed (");
console.expect("undefined");
console.write_line("Deno.test('test2', () => {});");
console.expect("test2 ... ok (");
console.expect("ok | 1 passed | 0 failed (");
console.expect("undefined");
});
}
#[test(flaky)]
fn npm_packages() {
let mut env_vars = util::env_vars_for_npm_tests();
env_vars.push(("NO_COLOR".to_owned(), "1".to_owned()));
let temp_dir = TempDir::new();
env_vars.push(("DENO_DIR".to_string(), temp_dir.path().to_string()));
{
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--quiet", "--allow-read", "--allow-env"],
Some(vec![
r#"import chalk from "npm:chalk";"#,
"chalk.red('hel' + 'lo')",
]),
Some(env_vars.clone()),
true,
);
assert_contains!(out, "hello");
assert!(err.is_empty(), "Error: {}", err);
}
{
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--quiet", "--allow-read", "--allow-env"],
Some(vec![
r#"const chalk = await import("npm:chalk");"#,
"chalk.default.red('hel' + 'lo')",
]),
Some(env_vars.clone()),
true,
);
assert_contains!(out, "hello");
assert!(err.is_empty(), "Error: {}", err);
}
{
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--quiet", "--allow-read", "--allow-env"],
Some(vec![r#"export {} from "npm:chalk";"#]),
Some(env_vars.clone()),
true,
);
assert_contains!(out, "[Module: null prototype] {");
assert_contains!(out, "Chalk: [class Chalk],");
assert!(err.is_empty(), "Error: {}", err);
}
{
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--quiet", "--allow-read", "--allow-env"],
Some(vec![r#"import foo from "npm:asdfawe52345asdf""#]),
Some(env_vars.clone()),
true,
);
assert_contains!(
out,
"error: npm package 'asdfawe52345asdf' does not exist"
);
assert!(err.is_empty(), "Error: {}", err);
}
{
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--quiet", "--allow-read", "--allow-env"],
Some(vec![
"import path from 'node:path';",
"path.isGlob('asdf') ? 'yes' : 'no'",
]),
Some(env_vars.clone()),
true,
);
assert_contains!(out, "no");
assert!(err.is_empty(), "Error: {}", err);
}
}
#[test(flaky)]
fn pty_tab_indexable_props() {
util::with_pty(&["repl"], |mut console| {
console.write_line("const arr = [1, 2, 3]");
console.expect("undefined");
console.write_raw("arr.\t\t");
console.expect("> arr.");
let output = console.read_until("> arr.");
assert_contains!(output, "constructor");
assert_contains!(output, "sort");
assert_contains!(output, "at");
assert_not_contains!(output, "0", "1", "2");
});
}
#[test(flaky)]
fn package_json_uncached_no_error() {
let test_context = TestContextBuilder::for_npm()
.use_temp_cwd()
.use_http_server()
.env("RUST_BACKTRACE", "1")
.build();
let temp_dir = test_context.temp_dir();
temp_dir.write("deno.json", "{ \"nodeModulesDir\": \"auto\" }");
temp_dir.write(
"package.json",
r#"{
"dependencies": {
"@denotest/esm-basic": "1.0.0"
}
}
"#,
);
test_context.new_command().with_pty(|mut console| {
console.write_line("console.log(123 + 456);");
console.expect_all(&["579", "undefined"]);
assert_not_contains!(
console.all_output(),
"Could not set npm package requirements",
);
// should support getting the package now though
console
.write_line("import { getValue, setValue } from '@denotest/esm-basic';");
console.expect("undefined");
console.write_line("setValue(12 + 30);");
console.expect("undefined");
console.write_line("getValue()");
console.expect("42");
assert!(temp_dir.path().join("node_modules").exists());
});
}
#[test(flaky)]
fn closed_file_pre_load_does_not_occur() {
TestContext::default()
.new_command()
.args_vec(["repl", "-A", "--log-level=debug"])
.with_pty(|console| {
assert_contains!(
console.all_output(),
"Skipped workspace walk due to client incapability.",
);
});
}
#[test(flaky)]
fn env_file() {
TestContext::default()
.new_command()
.args_vec([
"repl",
"--env=env",
"--allow-env",
"--eval",
"console.log(Deno.env.get('FOO'))",
])
.with_pty(|console| {
assert_contains!(console.all_output(), "BAR",);
});
}
// Regression test for https://github.com/denoland/deno/issues/20528
#[test(flaky)]
fn pty_promise_was_collected_regression_test() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl"],
Some(vec!["new Uint8Array(64 * 1024 * 1024)"]),
None,
false,
);
assert_contains!(out, "Uint8Array(67108864)");
assert!(err.is_empty());
}
#[test(flaky)]
fn eval_file_promise_error() {
let (out, err) = util::run_and_collect_output_with_args(
true,
vec!["repl", "--eval-file=./repl/promise_rejection.ts"],
None,
None,
false,
);
assert_contains!(out, "Uncaught undefined");
assert!(err.is_empty());
}
#[test(flaky)]
fn repl_json_imports() {
let context = TestContextBuilder::default().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write("./data.json", r#"{"hello": "world"}"#);
context
.new_command()
.env("NO_COLOR", "1")
.args_vec(["repl", "-A"])
.with_pty(|mut console| {
console.write_line_raw(
"import data from './data.json' with { type: 'json' };",
);
console.expect("undefined");
console.write_line_raw("data");
console.expect(r#"{ hello: "world" }"#);
});
}
#[test(flaky)]
fn repl_no_globalthis() {
let context = TestContextBuilder::default().use_temp_cwd().build();
context
.new_command()
.env("NO_COLOR", "1")
.args_vec(["repl", "-A"])
.with_pty(|mut console| {
console.write_line_raw("delete globalThis.globalThis;");
console.expect("true");
console.write_line_raw("console.log('Hello World')");
console.expect(r#"Hello World"#);
console.expect(r#"undefined"#);
});
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/jupyter_tests.rs | tests/integration/jupyter_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::process::Output;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use bytes::Bytes;
use chrono::DateTime;
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value;
use serde_json::json;
use test_util::DenoChild;
use test_util::TestContext;
use test_util::TestContextBuilder;
use test_util::assertions::assert_json_subset;
use test_util::eprintln;
use test_util::test;
use tokio::sync::Mutex;
use tokio::time::timeout;
use uuid::Uuid;
use zeromq::SocketRecv;
use zeromq::SocketSend;
use zeromq::ZmqMessage;
/// Jupyter connection file format
#[derive(Serialize)]
struct ConnectionSpec {
// key used for HMAC signature, if empty, hmac is not used
key: String,
signature_scheme: String,
transport: String,
ip: String,
hb_port: u16,
control_port: u16,
shell_port: u16,
stdin_port: u16,
iopub_port: u16,
kernel_name: String,
}
impl ConnectionSpec {
fn endpoint(&self, port: u16) -> String {
format!("{}://{}:{}", self.transport, self.ip, port)
}
}
/// Gets an unused port from the OS, and returns the port number and a
/// `TcpListener` bound to that port. You can keep the listener alive
/// to prevent another process from binding to the port.
fn pick_unused_port() -> (u16, std::net::TcpListener) {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
(listener.local_addr().unwrap().port(), listener)
}
impl ConnectionSpec {
fn new() -> (Self, Vec<std::net::TcpListener>) {
let mut listeners = Vec::new();
let (hb_port, listener) = pick_unused_port();
listeners.push(listener);
let (control_port, listener) = pick_unused_port();
listeners.push(listener);
let (shell_port, listener) = pick_unused_port();
listeners.push(listener);
let (stdin_port, listener) = pick_unused_port();
listeners.push(listener);
let (iopub_port, listener) = pick_unused_port();
listeners.push(listener);
(
Self {
key: "".into(),
signature_scheme: "hmac-sha256".into(),
transport: "tcp".into(),
ip: "127.0.0.1".into(),
hb_port,
control_port,
shell_port,
stdin_port,
iopub_port,
kernel_name: "deno".into(),
},
listeners,
)
}
}
const DELIMITER: &[u8] = b"<IDS|MSG>";
#[derive(Debug, Clone)]
struct JupyterMsg {
routing_prefix: Vec<String>,
signature: String,
header: MsgHeader,
parent_header: Value,
metadata: Value,
content: Value,
buffers: Vec<Bytes>,
}
impl Default for JupyterMsg {
fn default() -> Self {
Self {
routing_prefix: vec![Uuid::new_v4().to_string()],
signature: "".into(),
header: MsgHeader::default(),
parent_header: json!({}),
metadata: json!({}),
content: json!({}),
buffers: Vec::new(),
}
}
}
#[derive(Serialize, Clone, Debug, Deserialize)]
struct MsgHeader {
msg_id: Uuid,
session: Uuid,
date: DateTime<Utc>,
username: String,
msg_type: String,
version: String,
}
impl MsgHeader {
fn to_json(&self) -> Value {
serde_json::to_value(self).unwrap()
}
}
impl Default for MsgHeader {
fn default() -> Self {
Self {
msg_id: Uuid::new_v4(),
session: Uuid::new_v4(),
date: chrono::Utc::now(),
username: "test".into(),
msg_type: "kernel_info_request".into(),
version: "5.3".into(),
}
}
}
impl JupyterMsg {
fn to_raw(&self) -> ZmqMessage {
let mut parts = Vec::new();
parts.extend(
self
.routing_prefix
.iter()
.map(|uuid| uuid.as_bytes().to_vec().into()),
);
parts.push(Bytes::from_static(DELIMITER));
parts.push(self.signature.clone().into());
parts.push(serde_json::to_vec(&self.header).unwrap().into());
parts.push(self.parent_header.to_string().into());
parts.push(self.metadata.to_string().into());
parts.push(self.content.to_string().into());
parts.extend(self.buffers.clone());
ZmqMessage::try_from(parts).unwrap()
}
fn new(session: Uuid, msg_type: impl AsRef<str>, content: Value) -> Self {
Self {
header: MsgHeader {
session,
msg_type: msg_type.as_ref().into(),
..Default::default()
},
content,
..Default::default()
}
}
fn from_raw(msg: ZmqMessage) -> Self {
let parts = msg.into_vec();
let delimiter = parts.iter().position(|part| part == DELIMITER).unwrap();
let routing_prefix = parts[..delimiter]
.iter()
.map(|part: &Bytes| String::from_utf8_lossy(part.as_ref()).to_string())
.collect();
let signature = String::from_utf8(parts[delimiter + 1].to_vec())
.expect("Failed to parse signature");
let header: MsgHeader = serde_json::from_slice(&parts[delimiter + 2])
.expect("Failed to parse header");
let parent_header: Value =
serde_json::from_slice(&parts[delimiter + 3]).unwrap();
let metadata: Value =
serde_json::from_slice(&parts[delimiter + 4]).unwrap();
let content: Value = serde_json::from_slice(&parts[delimiter + 5]).unwrap();
let buffers = parts[delimiter + 6..].to_vec();
Self {
routing_prefix,
signature,
header,
parent_header,
metadata,
content,
buffers,
}
}
}
async fn connect_socket<S: zeromq::Socket>(
spec: &ConnectionSpec,
port: u16,
) -> S {
let addr = spec.endpoint(port);
let mut socket = S::new();
match timeout(Duration::from_millis(5000), socket.connect(&addr)).await {
Ok(Ok(_)) => socket,
Ok(Err(e)) => {
panic!("Failed to connect to {addr}: {e}");
}
Err(e) => {
panic!("Timed out connecting to {addr}: {e}");
}
}
}
#[derive(Clone)]
struct JupyterClient {
recv_timeout: Duration,
session: Uuid,
heartbeat: Arc<Mutex<zeromq::ReqSocket>>,
control: Arc<Mutex<zeromq::DealerSocket>>,
shell: Arc<Mutex<zeromq::DealerSocket>>,
io_pub: Arc<Mutex<zeromq::SubSocket>>,
stdin: Arc<Mutex<zeromq::RouterSocket>>,
}
#[derive(Debug, Clone, Copy)]
enum JupyterChannel {
Control,
Shell,
#[allow(dead_code)]
Stdin,
IoPub,
}
use JupyterChannel::*;
impl JupyterClient {
async fn new(spec: &ConnectionSpec) -> Self {
Self::new_with_timeout(spec, Duration::from_secs(10)).await
}
async fn new_with_timeout(spec: &ConnectionSpec, timeout: Duration) -> Self {
let (heartbeat, control, shell, io_pub, stdin) = tokio::join!(
connect_socket::<zeromq::ReqSocket>(spec, spec.hb_port),
connect_socket::<zeromq::DealerSocket>(spec, spec.control_port),
connect_socket::<zeromq::DealerSocket>(spec, spec.shell_port),
connect_socket::<zeromq::SubSocket>(spec, spec.iopub_port),
connect_socket::<zeromq::RouterSocket>(spec, spec.stdin_port),
);
Self {
session: Uuid::new_v4(),
heartbeat: Arc::new(Mutex::new(heartbeat)),
control: Arc::new(Mutex::new(control)),
shell: Arc::new(Mutex::new(shell)),
io_pub: Arc::new(Mutex::new(io_pub)),
stdin: Arc::new(Mutex::new(stdin)),
recv_timeout: timeout,
}
}
async fn io_subscribe(&self, topic: &str) -> Result<()> {
Ok(self.io_pub.lock().await.subscribe(topic).await?)
}
async fn recv_with_timeout<S: SocketRecv>(
&self,
s: &mut S,
) -> Result<JupyterMsg> {
let msg = timeout(self.recv_timeout, s.recv()).await??;
Ok(JupyterMsg::from_raw(msg))
}
async fn send_msg(
&self,
channel: JupyterChannel,
msg: JupyterMsg,
) -> Result<JupyterMsg> {
let raw = msg.to_raw();
match channel {
Control => self.control.lock().await.send(raw).await?,
Shell => self.shell.lock().await.send(raw).await?,
Stdin => self.stdin.lock().await.send(raw).await?,
IoPub => panic!("Cannot send over IOPub"),
}
Ok(msg)
}
async fn send(
&self,
channel: JupyterChannel,
msg_type: &str,
content: Value,
) -> Result<JupyterMsg> {
let msg = JupyterMsg::new(self.session, msg_type, content);
self.send_msg(channel, msg).await
}
async fn recv(&self, channel: JupyterChannel) -> Result<JupyterMsg> {
Ok(match channel {
Control => {
self
.recv_with_timeout(&mut *self.control.lock().await)
.await?
}
Shell => {
self
.recv_with_timeout(&mut *self.shell.lock().await)
.await?
}
Stdin => {
self
.recv_with_timeout(&mut *self.stdin.lock().await)
.await?
}
IoPub => {
self
.recv_with_timeout(&mut *self.io_pub.lock().await)
.await?
}
})
}
async fn send_heartbeat(&self, bytes: impl AsRef<[u8]>) -> Result<()> {
Ok(
self
.heartbeat
.lock()
.await
.send(ZmqMessage::from(bytes.as_ref().to_vec()))
.await?,
)
}
async fn recv_heartbeat(&self) -> Result<Bytes> {
Ok(
timeout(self.recv_timeout, self.heartbeat.lock().await.recv())
.await??
.into_vec()[0]
.clone(),
)
}
}
async fn wait_or_kill(
mut process: DenoChild,
wait: Duration,
) -> Result<Output> {
let start = std::time::Instant::now();
while start.elapsed() < wait {
if process.try_wait()?.is_some() {
return Ok(process.wait_with_output()?);
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
process.kill()?;
Ok(process.wait_with_output()?)
}
// Wrapper around the Jupyter server process that
// ensures the process is killed when dropped.
struct JupyterServerProcess(Option<DenoChild>);
impl JupyterServerProcess {
// Wait for the process to exit, or kill it after the given duration.
//
// Ideally we could use this at the end of each test, but the server
// doesn't seem to exit in a reasonable amount of time after getting
// a shutdown request.
#[allow(dead_code)]
async fn wait_or_kill(mut self, wait: Duration) -> Output {
wait_or_kill(self.0.take().unwrap(), wait).await.unwrap()
}
}
impl Drop for JupyterServerProcess {
fn drop(&mut self) {
let Some(mut proc) = self.0.take() else {
return;
};
if proc.try_wait().unwrap().is_some() {
// already exited
return;
}
proc.kill().unwrap();
}
}
async fn server_ready_on(addr: &str) -> bool {
matches!(
timeout(
Duration::from_millis(1000),
tokio::net::TcpStream::connect(addr.trim_start_matches("tcp://")),
)
.await,
Ok(Ok(_))
)
}
async fn server_ready(conn: &ConnectionSpec) -> bool {
let hb = conn.endpoint(conn.hb_port);
let control = conn.endpoint(conn.control_port);
let shell = conn.endpoint(conn.shell_port);
let stdin = conn.endpoint(conn.stdin_port);
let iopub = conn.endpoint(conn.iopub_port);
let (a, b, c, d, e) = tokio::join!(
server_ready_on(&hb),
server_ready_on(&control),
server_ready_on(&shell),
server_ready_on(&stdin),
server_ready_on(&iopub),
);
a && b && c && d && e
}
async fn setup_server() -> (TestContext, ConnectionSpec, JupyterServerProcess) {
let context = TestContextBuilder::new().use_temp_cwd().build();
let (mut conn, mut listeners) = ConnectionSpec::new();
let conn_file = context.temp_dir().path().join("connection.json");
conn_file.write_json(&conn);
let start_process = |conn_file: &test_util::PathRef| {
context
.new_command()
.args_vec(vec![
"jupyter",
"--kernel",
"--conn",
conn_file.to_string().as_str(),
])
.spawn()
.unwrap()
};
// drop the listeners so the server can listen on the ports
drop(listeners);
// try to start the server, retrying up to 5 times
// (this can happen due to TOCTOU errors with selecting unused TCP ports)
let mut process = start_process(&conn_file);
'outer: for i in 0..10 {
// try to see if the server is healthy
for _ in 0..10 {
// server still running?
if process.try_wait().unwrap().is_none() {
// listening on all ports?
if server_ready(&conn).await {
// server is ready to go
break 'outer;
}
} else {
// server exited, try again
break;
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
// pick new ports and try again
(conn, listeners) = ConnectionSpec::new();
conn_file.write_json(&conn);
drop(listeners);
process = start_process(&conn_file);
tokio::time::sleep(Duration::from_millis((i + 1) * 250)).await;
}
if process.try_wait().unwrap().is_some() || !server_ready(&conn).await {
panic!("Failed to start Jupyter server");
}
(context, conn, JupyterServerProcess(Some(process)))
}
async fn setup() -> (TestContext, JupyterClient, JupyterServerProcess) {
let (context, conn, process) = setup_server().await;
let client = JupyterClient::new(&conn).await;
client.io_subscribe("").await.unwrap();
// make sure server is ready to receive messages
client.send_heartbeat(b"ping").await.unwrap();
let _ = client.recv_heartbeat().await.unwrap();
(context, client, process)
}
#[test]
async fn jupyter_heartbeat_echoes() -> Result<()> {
let (_ctx, client, _process) = setup().await;
client.send_heartbeat(b"ping").await?;
let msg = client.recv_heartbeat().await?;
assert_eq!(msg, Bytes::from_static(b"pong"));
Ok(())
}
#[test]
async fn jupyter_kernel_info() -> Result<()> {
let (_ctx, client, _process) = setup().await;
client
.send(Control, "kernel_info_request", json!({}))
.await?;
let msg = client.recv(Control).await?;
assert_eq!(msg.header.msg_type, "kernel_info_reply");
assert_json_subset(
msg.content,
json!({
"status": "ok",
"implementation": "Deno kernel",
"language_info": {
"name": "typescript",
"mimetype": "text/x.typescript",
"file_extension": ".ts",
"pygments_lexer": "typescript",
"nbconvert_exporter": "script"
},
}),
);
Ok(())
}
#[test]
async fn jupyter_execute_request() -> Result<()> {
let (_ctx, client, _process) = setup().await;
let request = client
.send(
Shell,
"execute_request",
json!({
"silent": false,
"store_history": true,
"user_expressions": {},
"allow_stdin": true,
"stop_on_error": false,
"code": "console.log(\"asdf\")"
}),
)
.await?;
let reply = client.recv(Shell).await?;
assert_eq!(reply.header.msg_type, "execute_reply");
assert_json_subset(
reply.content,
json!({
"status": "ok",
"execution_count": 1,
}),
);
let mut msgs = Vec::new();
for _ in 0..4 {
match client.recv(IoPub).await {
Ok(msg) => msgs.push(msg),
Err(e) => {
if e.downcast_ref::<tokio::time::error::Elapsed>().is_some() {
// may timeout if we missed some messages
eprintln!("Timed out waiting for messages");
}
panic!("Error: {:#?}", e);
}
}
}
let execution_idle = msgs
.iter()
.find(|msg| {
if let Some(state) = msg.content.get("execution_state") {
state == "idle"
} else {
false
}
})
.expect("execution_state idle not found");
assert_eq!(execution_idle.parent_header, request.header.to_json());
assert_json_subset(
execution_idle.content.clone(),
json!({
"execution_state": "idle",
}),
);
let execution_result = msgs
.iter()
.find(|msg| msg.header.msg_type == "stream")
.expect("stream not found");
assert_eq!(execution_result.header.msg_type, "stream");
assert_eq!(execution_result.parent_header, request.header.to_json());
assert_json_subset(
execution_result.content.clone(),
json!({
"name": "stdout",
"text": "asdf\n", // the trailing newline is added by console.log
}),
);
Ok(())
}
#[test]
async fn jupyter_store_history_false() -> Result<()> {
let (_ctx, client, _process) = setup().await;
client
.send(
Shell,
"execute_request",
json!({
"silent": false,
"store_history": false,
"code": "console.log(\"asdf\")",
}),
)
.await?;
let reply = client.recv(Shell).await?;
assert_eq!(reply.header.msg_type, "execute_reply");
assert_json_subset(
reply.content,
json!({
"status": "ok",
"execution_count": 0,
}),
);
Ok(())
}
#[test]
async fn jupyter_http_server() -> Result<()> {
let (_ctx, client, _process) = setup().await;
client
.send(
Shell,
"execute_request",
json!({
"silent": false,
"store_history": false,
"code": r#"Deno.serve({ port: 10234 }, (req) => Response.json({ hello: "world" }))"#,
}),
)
.await?;
let reply = client.recv(Shell).await?;
assert_eq!(reply.header.msg_type, "execute_reply");
assert_json_subset(
reply.content,
json!({
"status": "ok",
"execution_count": 0,
}),
);
for _ in 0..3 {
let resp = reqwest::get("http://localhost:10234").await.unwrap();
let text: serde_json::Value = resp.json().await.unwrap();
assert_eq!(text, json!({ "hello": "world" }));
}
Ok(())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/watcher_tests.rs | tests/integration/watcher_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use test_util as util;
use test_util::TempDir;
use test_util::assert_contains;
use test_util::env_vars_for_npm_tests;
use test_util::eprintln;
use test_util::http_server;
use test_util::test;
use tokio::io::AsyncBufReadExt;
use util::DenoChild;
use util::assert_not_contains;
/// Logs to stderr every time next_line() is called
struct LoggingLines<R>
where
R: tokio::io::AsyncBufRead + Unpin,
{
pub lines: tokio::io::Lines<R>,
pub stream_name: String,
}
impl<R> LoggingLines<R>
where
R: tokio::io::AsyncBufRead + Unpin,
{
pub async fn next_line(&mut self) -> tokio::io::Result<Option<String>> {
let line = self.lines.next_line().await;
eprintln!(
"{}: {}",
self.stream_name,
line.as_ref().unwrap().clone().unwrap()
);
line
}
}
// Helper function to skip watcher output that contains "Restarting"
// phrase.
async fn skip_restarting_line<R>(stderr_lines: &mut LoggingLines<R>) -> String
where
R: tokio::io::AsyncBufRead + Unpin,
{
loop {
let msg = next_line(stderr_lines).await.unwrap();
if !msg.contains("Restarting") {
return msg;
}
}
}
async fn read_all_lints<R>(stderr_lines: &mut LoggingLines<R>) -> String
where
R: tokio::io::AsyncBufRead + Unpin,
{
let mut str = String::new();
while let Some(t) = next_line(stderr_lines).await {
let t = util::strip_ansi_codes(&t);
if t.starts_with("Watcher Restarting! File change detected") {
continue;
}
if t.starts_with("Watcher") {
break;
}
if t.starts_with("error[") {
str.push_str(&t);
str.push('\n');
}
}
str
}
async fn next_line<R>(lines: &mut LoggingLines<R>) -> Option<String>
where
R: tokio::io::AsyncBufRead + Unpin,
{
let timeout = tokio::time::Duration::from_secs(60);
tokio::time::timeout(timeout, lines.next_line())
.await
.unwrap_or_else(|_| {
panic!(
"Output did not contain a new line after {} seconds",
timeout.as_secs()
)
})
.unwrap()
}
/// Returns the matched line or None if there are no more lines in this stream
async fn wait_for<R>(
condition: impl Fn(&str) -> bool,
lines: &mut LoggingLines<R>,
) -> Option<String>
where
R: tokio::io::AsyncBufRead + Unpin,
{
while let Some(line) = lines.next_line().await.unwrap() {
if condition(line.as_str()) {
return Some(line);
}
}
None
}
async fn wait_contains<R>(s: &str, lines: &mut LoggingLines<R>) -> String
where
R: tokio::io::AsyncBufRead + Unpin,
{
let timeout = tokio::time::Duration::from_secs(60);
tokio::time::timeout(timeout, wait_for(|line| line.contains(s), lines))
.await
.unwrap_or_else(|_| {
panic!(
"Output did not contain \"{}\" after {} seconds",
s,
timeout.as_secs()
)
})
.unwrap_or_else(|| panic!("Output ended without containing \"{}\"", s))
}
/// Before test cases touch files, they need to wait for the watcher to be
/// ready. Waiting for subcommand output is insufficient.
/// The file watcher takes a moment to start watching files due to
/// asynchronicity. It is possible for the watched subcommand to finish before
/// any files are being watched.
/// deno must be running with --log-level=debug
/// file_name should be the file name and, optionally, extension. file_name
/// may not be a full path, as it is not portable.
async fn wait_for_watcher<R>(
file_name: &str,
stderr_lines: &mut LoggingLines<R>,
) -> String
where
R: tokio::io::AsyncBufRead + Unpin,
{
let timeout = tokio::time::Duration::from_secs(60);
tokio::time::timeout(
timeout,
wait_for(
|line| line.contains("Watching paths") && line.contains(file_name),
stderr_lines,
),
)
.await
.unwrap_or_else(|_| {
panic!(
"Watcher did not start for file \"{}\" after {} seconds",
file_name,
timeout.as_secs()
)
})
.unwrap_or_else(|| {
panic!(
"Output ended without before the watcher started watching file \"{}\"",
file_name
)
})
}
fn check_alive_then_kill(mut child: DenoChild) {
assert!(child.try_wait().unwrap().is_none());
child.kill().unwrap();
}
fn child_lines(
child: &mut std::process::Child,
) -> (
LoggingLines<tokio::io::BufReader<tokio::process::ChildStdout>>,
LoggingLines<tokio::io::BufReader<tokio::process::ChildStderr>>,
) {
let stdout_lines = LoggingLines {
lines: tokio::io::BufReader::new(
tokio::process::ChildStdout::from_std(child.stdout.take().unwrap())
.unwrap(),
)
.lines(),
stream_name: "STDOUT".to_string(),
};
let stderr_lines = LoggingLines {
lines: tokio::io::BufReader::new(
tokio::process::ChildStderr::from_std(child.stderr.take().unwrap())
.unwrap(),
)
.lines(),
stream_name: "STDERR".to_string(),
};
(stdout_lines, stderr_lines)
}
#[test(flaky)]
async fn lint_watch_test() {
let t = TempDir::new();
let badly_linted_original =
util::testdata_path().join("lint/watch/badly_linted.js");
let badly_linted_output =
util::testdata_path().join("lint/watch/badly_linted.js.out");
let badly_linted_fixed1 =
util::testdata_path().join("lint/watch/badly_linted_fixed1.js");
let badly_linted_fixed1_output =
util::testdata_path().join("lint/watch/badly_linted_fixed1.js.out");
let badly_linted_fixed2 =
util::testdata_path().join("lint/watch/badly_linted_fixed2.js");
let badly_linted_fixed2_output =
util::testdata_path().join("lint/watch/badly_linted_fixed2.js.out");
let badly_linted = t.path().join("badly_linted.js");
badly_linted_original.copy(&badly_linted);
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("lint")
.arg(&badly_linted)
.arg("--watch")
.piped_output()
.spawn()
.unwrap();
let (_stdout_lines, mut stderr_lines) = child_lines(&mut child);
let next_line = next_line(&mut stderr_lines).await.unwrap();
assert_contains!(&next_line, "Lint started");
let mut output = read_all_lints(&mut stderr_lines).await;
let expected = badly_linted_output.read_to_string();
assert_eq!(output, expected);
// Change content of the file again to be badly-linted
badly_linted_fixed1.copy(&badly_linted);
output = read_all_lints(&mut stderr_lines).await;
let expected = badly_linted_fixed1_output.read_to_string();
assert_eq!(output, expected);
// Change content of the file again to be badly-linted
badly_linted_fixed2.copy(&badly_linted);
output = read_all_lints(&mut stderr_lines).await;
let expected = badly_linted_fixed2_output.read_to_string();
assert_eq!(output, expected);
// the watcher process is still alive
assert!(child.try_wait().unwrap().is_none());
child.kill().unwrap();
}
#[test(flaky)]
async fn lint_watch_without_args_test() {
let t = TempDir::new();
let badly_linted_original =
util::testdata_path().join("lint/watch/badly_linted.js");
let badly_linted_output =
util::testdata_path().join("lint/watch/badly_linted.js.out");
let badly_linted_fixed1 =
util::testdata_path().join("lint/watch/badly_linted_fixed1.js");
let badly_linted_fixed1_output =
util::testdata_path().join("lint/watch/badly_linted_fixed1.js.out");
let badly_linted_fixed2 =
util::testdata_path().join("lint/watch/badly_linted_fixed2.js");
let badly_linted_fixed2_output =
util::testdata_path().join("lint/watch/badly_linted_fixed2.js.out");
let badly_linted = t.path().join("badly_linted.js");
badly_linted_original.copy(&badly_linted);
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("lint")
.arg("--watch")
.piped_output()
.spawn()
.unwrap();
let (_stdout_lines, mut stderr_lines) = child_lines(&mut child);
let next_line = next_line(&mut stderr_lines).await.unwrap();
assert_contains!(&next_line, "Lint started");
let mut output = read_all_lints(&mut stderr_lines).await;
let expected = badly_linted_output.read_to_string();
assert_eq!(output, expected);
// Change content of the file again to be badly-linted
badly_linted_fixed1.copy(&badly_linted);
output = read_all_lints(&mut stderr_lines).await;
let expected = badly_linted_fixed1_output.read_to_string();
assert_eq!(output, expected);
// Change content of the file again to be badly-linted
badly_linted_fixed2.copy(&badly_linted);
output = read_all_lints(&mut stderr_lines).await;
let expected = badly_linted_fixed2_output.read_to_string();
assert_eq!(output, expected);
// the watcher process is still alive
assert!(child.try_wait().unwrap().is_none());
child.kill().unwrap();
drop(t);
}
#[test(flaky)]
async fn lint_all_files_on_each_change_test() {
let t = TempDir::new();
let badly_linted_fixed0 =
util::testdata_path().join("lint/watch/badly_linted.js");
let badly_linted_fixed1 =
util::testdata_path().join("lint/watch/badly_linted_fixed1.js");
let badly_linted_fixed2 =
util::testdata_path().join("lint/watch/badly_linted_fixed2.js");
let badly_linted_1 = t.path().join("badly_linted_1.js");
let badly_linted_2 = t.path().join("badly_linted_2.js");
badly_linted_fixed0.copy(&badly_linted_1);
badly_linted_fixed1.copy(&badly_linted_2);
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("lint")
.arg(t.path())
.arg("--watch")
.piped_output()
.spawn()
.unwrap();
let (_stdout_lines, mut stderr_lines) = child_lines(&mut child);
assert_contains!(
wait_contains("Checked", &mut stderr_lines).await,
"Checked 2 files"
);
badly_linted_fixed2.copy(&badly_linted_2);
assert_contains!(
wait_contains("Checked", &mut stderr_lines).await,
"Checked 2 files"
);
assert!(child.try_wait().unwrap().is_none());
child.kill().unwrap();
drop(t);
}
#[test(flaky)]
async fn fmt_watch_test() {
let fmt_testdata_path = util::testdata_path().join("fmt");
let t = TempDir::new();
let fixed = fmt_testdata_path.join("badly_formatted_fixed.js");
let badly_formatted_original = fmt_testdata_path.join("badly_formatted.mjs");
let badly_formatted = t.path().join("badly_formatted.js");
badly_formatted_original.copy(&badly_formatted);
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("fmt")
.arg(&badly_formatted)
.arg("--watch")
.piped_output()
.spawn()
.unwrap();
let (_stdout_lines, mut stderr_lines) = child_lines(&mut child);
let next_line = next_line(&mut stderr_lines).await.unwrap();
assert_contains!(&next_line, "Fmt started");
assert_contains!(
skip_restarting_line(&mut stderr_lines).await,
"badly_formatted.js"
);
assert_contains!(
wait_contains("Checked", &mut stderr_lines).await,
"Checked 1 file"
);
wait_contains("Fmt finished", &mut stderr_lines).await;
let expected = fixed.read_to_string();
let actual = badly_formatted.read_to_string();
assert_eq!(actual, expected);
// Change content of the file again to be badly formatted
badly_formatted_original.copy(&badly_formatted);
assert_contains!(
skip_restarting_line(&mut stderr_lines).await,
"badly_formatted.js"
);
assert_contains!(
wait_contains("Checked", &mut stderr_lines).await,
"Checked 1 file"
);
wait_contains("Fmt finished", &mut stderr_lines).await;
// Check if file has been automatically formatted by watcher
let expected = fixed.read_to_string();
let actual = badly_formatted.read_to_string();
assert_eq!(actual, expected);
check_alive_then_kill(child);
}
#[test(flaky)]
async fn fmt_watch_without_args_test() {
let fmt_testdata_path = util::testdata_path().join("fmt");
let t = TempDir::new();
let fixed = fmt_testdata_path.join("badly_formatted_fixed.js");
let badly_formatted_original = fmt_testdata_path.join("badly_formatted.mjs");
let badly_formatted = t.path().join("badly_formatted.js");
badly_formatted_original.copy(&badly_formatted);
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("fmt")
.arg("--watch")
.arg(".")
.piped_output()
.spawn()
.unwrap();
let (_stdout_lines, mut stderr_lines) = child_lines(&mut child);
let next_line = next_line(&mut stderr_lines).await.unwrap();
assert_contains!(&next_line, "Fmt started");
assert_contains!(
skip_restarting_line(&mut stderr_lines).await,
"badly_formatted.js"
);
assert_contains!(
wait_contains("Checked", &mut stderr_lines).await,
"Checked 1 file"
);
wait_contains("Fmt finished.", &mut stderr_lines).await;
let expected = fixed.read_to_string();
let actual = badly_formatted.read_to_string();
assert_eq!(actual, expected);
// Change content of the file again to be badly formatted
badly_formatted_original.copy(&badly_formatted);
assert_contains!(
skip_restarting_line(&mut stderr_lines).await,
"badly_formatted.js"
);
assert_contains!(
wait_contains("Checked", &mut stderr_lines).await,
"Checked 1 file"
);
// Check if file has been automatically formatted by watcher
let expected = fixed.read_to_string();
let actual = badly_formatted.read_to_string();
assert_eq!(actual, expected);
check_alive_then_kill(child);
}
#[test(flaky)]
async fn fmt_check_all_files_on_each_change_test() {
let t = TempDir::new();
let fmt_testdata_path = util::testdata_path().join("fmt");
let badly_formatted_original = fmt_testdata_path.join("badly_formatted.mjs");
let badly_formatted_1 = t.path().join("badly_formatted_1.js");
let badly_formatted_2 = t.path().join("badly_formatted_2.js");
badly_formatted_original.copy(&badly_formatted_1);
badly_formatted_original.copy(&badly_formatted_2);
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("fmt")
.arg(t.path())
.arg("--watch")
.arg("--check")
.piped_output()
.spawn()
.unwrap();
let (_stdout_lines, mut stderr_lines) = child_lines(&mut child);
assert_contains!(
wait_contains("error", &mut stderr_lines).await,
"Found 2 not formatted files in 2 files"
);
wait_contains("Fmt failed.", &mut stderr_lines).await;
// Change content of the file again to be badly formatted
badly_formatted_original.copy(&badly_formatted_1);
assert_contains!(
wait_contains("error", &mut stderr_lines).await,
"Found 2 not formatted files in 2 files"
);
check_alive_then_kill(child);
}
#[test(flaky)]
async fn run_watch_no_dynamic() {
let t = TempDir::new();
let file_to_watch = t.path().join("file_to_watch.js");
file_to_watch.write("console.log('Hello world');");
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("run")
.arg("--watch")
.arg("-L")
.arg("debug")
.arg(&file_to_watch)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
wait_contains("Hello world", &mut stdout_lines).await;
wait_for_watcher("file_to_watch.js", &mut stderr_lines).await;
// Change content of the file
file_to_watch.write("console.log('Hello world2');");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("Hello world2", &mut stdout_lines).await;
wait_for_watcher("file_to_watch.js", &mut stderr_lines).await;
// Add dependency
let another_file = t.path().join("another_file.js");
another_file.write("export const foo = 0;");
file_to_watch
.write("import { foo } from './another_file.js'; console.log(foo);");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("0", &mut stdout_lines).await;
wait_for_watcher("another_file.js", &mut stderr_lines).await;
// Confirm that restarting occurs when a new file is updated
another_file.write("export const foo = 42;");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("42", &mut stdout_lines).await;
wait_for_watcher("file_to_watch.js", &mut stderr_lines).await;
// Confirm that the watcher keeps on working even if the file is updated and has invalid syntax
file_to_watch.write("syntax error ^^");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("error:", &mut stderr_lines).await;
wait_for_watcher("file_to_watch.js", &mut stderr_lines).await;
// Then restore the file
file_to_watch
.write("import { foo } from './another_file.js'; console.log(foo);");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("42", &mut stdout_lines).await;
wait_for_watcher("another_file.js", &mut stderr_lines).await;
// Update the content of the imported file with invalid syntax
another_file.write("syntax error ^^");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("error:", &mut stderr_lines).await;
wait_for_watcher("another_file.js", &mut stderr_lines).await;
// Modify the imported file and make sure that restarting occurs
another_file.write("export const foo = 'modified!';");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("modified!", &mut stdout_lines).await;
wait_contains("Watching paths", &mut stderr_lines).await;
check_alive_then_kill(child);
}
#[test(flaky)]
async fn serve_watch_all() {
let t = TempDir::new();
let main_file_to_watch = t.path().join("main_file_to_watch.js");
main_file_to_watch.write(
"export default {
fetch(_request) {
return new Response(\"aaaaaaqqq!\");
},
};",
);
let another_file = t.path().join("another_file.js");
another_file.write("");
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("serve")
.arg(format!("--watch={another_file}"))
.arg("-L")
.arg("debug")
.arg(&main_file_to_watch)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
wait_for_watcher("main_file_to_watch.js", &mut stderr_lines).await;
// Change content of the file
main_file_to_watch.write(
"export default {
fetch(_request) {
return new Response(\"aaaaaaqqq123!\");
},
};",
);
wait_contains("Restarting", &mut stderr_lines).await;
wait_for_watcher("main_file_to_watch.js", &mut stderr_lines).await;
another_file.write("export const foo = 0;");
// Confirm that the added file is watched as well
wait_contains("Restarting", &mut stderr_lines).await;
wait_for_watcher("main_file_to_watch.js", &mut stderr_lines).await;
main_file_to_watch
.write("import { foo } from './another_file.js'; console.log(foo);");
wait_contains("Restarting", &mut stderr_lines).await;
wait_for_watcher("main_file_to_watch.js", &mut stderr_lines).await;
wait_contains("0", &mut stdout_lines).await;
another_file.write("export const foo = 42;");
wait_contains("Restarting", &mut stderr_lines).await;
wait_for_watcher("main_file_to_watch.js", &mut stderr_lines).await;
wait_contains("42", &mut stdout_lines).await;
// Confirm that watch continues even with wrong syntax error
another_file.write("syntax error ^^");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("error:", &mut stderr_lines).await;
wait_for_watcher("main_file_to_watch.js", &mut stderr_lines).await;
main_file_to_watch.write(
"export default {
fetch(_request) {
return new Response(\"aaaaaaqqq!\");
},
};",
);
wait_contains("Restarting", &mut stderr_lines).await;
wait_for_watcher("main_file_to_watch.js", &mut stderr_lines).await;
check_alive_then_kill(child);
}
#[test(flaky)]
async fn run_watch_npm_specifier() {
let _g = util::http_server();
let t = TempDir::new();
let file_to_watch = t.path().join("file_to_watch.txt");
file_to_watch.write("Hello world");
let mut child = util::deno_cmd()
.current_dir(t.path())
.envs(env_vars_for_npm_tests())
.arg("run")
.arg("--watch=file_to_watch.txt")
.arg("-L")
.arg("debug")
.arg("npm:@denotest/bin/cli-cjs")
.arg("Hello world")
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
wait_contains("Hello world", &mut stdout_lines).await;
wait_for_watcher("file_to_watch.txt", &mut stderr_lines).await;
// Change content of the file
file_to_watch.write("Hello world2");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("Hello world", &mut stdout_lines).await;
wait_for_watcher("file_to_watch.txt", &mut stderr_lines).await;
check_alive_then_kill(child);
}
// TODO(bartlomieju): this test became flaky on macOS runner; it is unclear
// if that's because of a bug in code or the runner itself. We should reenable
// it once we upgrade to XL runners for macOS.
#[cfg(not(target_os = "macos"))]
#[test(flaky)]
async fn run_watch_external_watch_files() {
let t = TempDir::new();
let file_to_watch = t.path().join("file_to_watch.js");
file_to_watch.write("console.log('Hello world');");
let external_file_to_watch = t.path().join("external_file_to_watch.txt");
external_file_to_watch.write("Hello world");
let mut watch_arg = "--watch=".to_owned();
let external_file_to_watch_str = external_file_to_watch.to_string();
watch_arg.push_str(&external_file_to_watch_str);
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("run")
.arg(watch_arg)
.arg("-L")
.arg("debug")
.arg(&file_to_watch)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
wait_contains("Process started", &mut stderr_lines).await;
wait_contains("Hello world", &mut stdout_lines).await;
wait_for_watcher("external_file_to_watch.txt", &mut stderr_lines).await;
// Change content of the external file
external_file_to_watch.write("Hello world2");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("Process finished", &mut stderr_lines).await;
// Again (https://github.com/denoland/deno/issues/17584)
external_file_to_watch.write("Hello world3");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("Process finished", &mut stderr_lines).await;
check_alive_then_kill(child);
}
#[test(flaky)]
async fn run_watch_load_unload_events() {
let t = TempDir::new();
let file_to_watch = t.path().join("file_to_watch.js");
file_to_watch.write(
r#"
setInterval(() => {}, 0);
globalThis.addEventListener("load", () => {
console.log("load");
});
globalThis.addEventListener("unload", () => {
console.log("unload");
});
"#,
);
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("run")
.arg("--watch")
.arg("-L")
.arg("debug")
.arg(&file_to_watch)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
// Wait for the first load event to fire
wait_contains("load", &mut stdout_lines).await;
wait_for_watcher("file_to_watch.js", &mut stderr_lines).await;
// Change content of the file, this time without an interval to keep it alive.
file_to_watch.write(
r#"
globalThis.addEventListener("load", () => {
console.log("load");
});
globalThis.addEventListener("unload", () => {
console.log("unload");
});
"#,
);
// Wait for the restart
wait_contains("Restarting", &mut stderr_lines).await;
// Confirm that the unload event was dispatched from the first run
wait_contains("unload", &mut stdout_lines).await;
// Followed by the load event of the second run
wait_contains("load", &mut stdout_lines).await;
// Which is then unloaded as there is nothing keeping it alive.
wait_contains("unload", &mut stdout_lines).await;
check_alive_then_kill(child);
}
/// Confirm that the watcher continues to work even if module resolution fails at the *first* attempt
#[test(flaky)]
async fn run_watch_not_exit() {
let t = TempDir::new();
let file_to_watch = t.path().join("file_to_watch.js");
file_to_watch.write("syntax error ^^");
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("run")
.arg("--watch")
.arg("-L")
.arg("debug")
.arg(&file_to_watch)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
wait_contains("Process started", &mut stderr_lines).await;
wait_contains("error:", &mut stderr_lines).await;
wait_for_watcher("file_to_watch.js", &mut stderr_lines).await;
// Make sure the watcher actually restarts and works fine with the proper syntax
file_to_watch.write("console.log(42);");
wait_contains("Restarting", &mut stderr_lines).await;
wait_contains("42", &mut stdout_lines).await;
wait_contains("Process finished", &mut stderr_lines).await;
check_alive_then_kill(child);
}
#[test(flaky)]
async fn run_watch_with_import_map_and_relative_paths() {
fn create_relative_tmp_file(
directory: &TempDir,
filename: &'static str,
filecontent: &'static str,
) -> std::path::PathBuf {
let absolute_path = directory.path().join(filename);
absolute_path.write(filecontent);
let relative_path = absolute_path
.as_path()
.strip_prefix(directory.path())
.unwrap()
.to_owned();
assert!(relative_path.is_relative());
relative_path
}
let temp_directory = TempDir::new();
let file_to_watch = create_relative_tmp_file(
&temp_directory,
"file_to_watch.js",
"console.log('Hello world');",
);
let import_map_path = create_relative_tmp_file(
&temp_directory,
"import_map.json",
"{\"imports\": {}}",
);
let mut child = util::deno_cmd()
.current_dir(temp_directory.path())
.arg("run")
.arg("--watch")
.arg("--import-map")
.arg(&import_map_path)
.arg(&file_to_watch)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
let line = next_line(&mut stderr_lines).await.unwrap();
assert_contains!(&line, "Process started");
assert_contains!(
next_line(&mut stderr_lines).await.unwrap(),
"Process finished"
);
assert_contains!(next_line(&mut stdout_lines).await.unwrap(), "Hello world");
check_alive_then_kill(child);
}
#[test(flaky)]
async fn run_watch_with_ext_flag() {
let t = TempDir::new();
let file_to_watch = t.path().join("file_to_watch");
file_to_watch.write("interface I{}; console.log(42);");
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("run")
.arg("--watch")
.arg("--log-level")
.arg("debug")
.arg("--ext")
.arg("ts")
.arg(&file_to_watch)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
wait_contains("42", &mut stdout_lines).await;
// Make sure the watcher actually restarts and works fine with the proper language
wait_for_watcher("file_to_watch", &mut stderr_lines).await;
wait_contains("Process finished", &mut stderr_lines).await;
file_to_watch.write("type Bear = 'polar' | 'grizzly'; console.log(123);");
wait_contains("Restarting!", &mut stderr_lines).await;
wait_contains("123", &mut stdout_lines).await;
wait_contains("Process finished", &mut stderr_lines).await;
check_alive_then_kill(child);
}
#[test(flaky)]
async fn run_watch_error_messages() {
let t = TempDir::new();
let file_to_watch = t.path().join("file_to_watch.js");
file_to_watch
.write("throw SyntaxError(`outer`, {cause: TypeError(`inner`)})");
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("run")
.arg("--watch")
.arg(&file_to_watch)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (_, mut stderr_lines) = child_lines(&mut child);
wait_contains("Process started", &mut stderr_lines).await;
wait_contains(
"error: Uncaught (in promise) SyntaxError: outer",
&mut stderr_lines,
)
.await;
wait_contains("Caused by: TypeError: inner", &mut stderr_lines).await;
wait_contains("Process failed", &mut stderr_lines).await;
check_alive_then_kill(child);
}
#[test(flaky)]
async fn test_watch_basic() {
let t = TempDir::new();
let mut child = util::deno_cmd()
.current_dir(t.path())
.arg("test")
.arg("--watch")
.arg("--no-check")
.arg(t.path())
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
assert_eq!(next_line(&mut stdout_lines).await.unwrap(), "");
assert_contains!(
next_line(&mut stdout_lines).await.unwrap(),
"0 passed | 0 failed"
);
wait_contains("Test finished", &mut stderr_lines).await;
let foo_file = t.path().join("foo.js");
let bar_file = t.path().join("bar.js");
let foo_test = t.path().join("foo_test.js");
let bar_test = t.path().join("bar_test.js");
foo_file.write("export default function foo() { 1 + 1 }");
bar_file.write("export default function bar() { 2 + 2 }");
foo_test.write("import foo from './foo.js'; Deno.test('foo', foo);");
bar_test.write("import bar from './bar.js'; Deno.test('bar', bar);");
assert_eq!(next_line(&mut stdout_lines).await.unwrap(), "");
assert_contains!(
next_line(&mut stdout_lines).await.unwrap(),
"running 1 test"
);
assert_contains!(next_line(&mut stdout_lines).await.unwrap(), "foo", "bar");
assert_contains!(
next_line(&mut stdout_lines).await.unwrap(),
"running 1 test"
);
assert_contains!(next_line(&mut stdout_lines).await.unwrap(), "foo", "bar");
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
wait_contains("Test finished", &mut stderr_lines).await;
// Change content of the file
foo_test.write("import foo from './foo.js'; Deno.test('foobar', foo);");
assert_contains!(next_line(&mut stderr_lines).await.unwrap(), "Restarting");
assert_contains!(
next_line(&mut stdout_lines).await.unwrap(),
"running 1 test"
);
assert_contains!(next_line(&mut stdout_lines).await.unwrap(), "foobar");
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
wait_contains("Test finished", &mut stderr_lines).await;
// Add test
let another_test = t.path().join("new_test.js");
another_test.write("Deno.test('another one', () => 3 + 3)");
assert_contains!(next_line(&mut stderr_lines).await.unwrap(), "Restarting");
assert_contains!(
next_line(&mut stdout_lines).await.unwrap(),
"running 1 test"
);
assert_contains!(next_line(&mut stdout_lines).await.unwrap(), "another one");
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
wait_contains("Test finished", &mut stderr_lines).await;
// Confirm that restarting occurs when a new file is updated
another_test.write("Deno.test('another one', () => 3 + 3); Deno.test('another another one', () => 4 + 4)");
assert_contains!(next_line(&mut stderr_lines).await.unwrap(), "Restarting");
assert_contains!(
next_line(&mut stdout_lines).await.unwrap(),
"running 2 tests"
);
assert_contains!(next_line(&mut stdout_lines).await.unwrap(), "another one");
assert_contains!(
next_line(&mut stdout_lines).await.unwrap(),
"another another one"
);
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
wait_contains("Test finished", &mut stderr_lines).await;
// Confirm that the watcher keeps on working even if the file is updated and has invalid syntax
another_test.write("syntax error ^^");
assert_contains!(next_line(&mut stderr_lines).await.unwrap(), "Restarting");
assert_contains!(next_line(&mut stderr_lines).await.unwrap(), "error:");
assert_eq!(next_line(&mut stderr_lines).await.unwrap(), "");
assert_eq!(
next_line(&mut stderr_lines).await.unwrap(),
" syntax error ^^"
);
assert_eq!(
next_line(&mut stderr_lines).await.unwrap(),
" ~~~~~"
);
assert_contains!(next_line(&mut stderr_lines).await.unwrap(), "Test failed");
// Then restore the file
another_test.write("Deno.test('another one', () => 3 + 3)");
assert_contains!(next_line(&mut stderr_lines).await.unwrap(), "Restarting");
assert_contains!(
next_line(&mut stdout_lines).await.unwrap(),
"running 1 test"
);
assert_contains!(next_line(&mut stdout_lines).await.unwrap(), "another one");
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
next_line(&mut stdout_lines).await;
wait_contains("Test finished", &mut stderr_lines).await;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/inspector_tests.rs | tests/integration/inspector_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::BufRead;
use std::process::ChildStderr;
use std::time::Duration;
use anyhow::Error as AnyError;
use anyhow::anyhow;
use bytes::Bytes;
use fastwebsockets::FragmentCollector;
use fastwebsockets::Frame;
use fastwebsockets::WebSocket;
use hyper::Request;
use hyper::Response;
use hyper::body::Incoming;
use hyper::upgrade::Upgraded;
use hyper_util::rt::TokioIo;
use serde_json::json;
use test_util as util;
use test_util::eprintln;
use test_util::test;
use tokio::net::TcpStream;
use tokio::time::timeout;
use url::Url;
use util::DenoChild;
use util::TestContextBuilder;
use util::assert_contains;
use util::assert_starts_with;
struct SpawnExecutor;
impl<Fut> hyper::rt::Executor<Fut> for SpawnExecutor
where
Fut: std::future::Future + Send + 'static,
Fut::Output: Send + 'static,
{
fn execute(&self, fut: Fut) {
deno_unsync::spawn(fut);
}
}
async fn connect_to_ws(
uri: Url,
) -> (WebSocket<TokioIo<Upgraded>>, Response<Incoming>) {
let domain = &uri.host().unwrap().to_string();
let port = &uri.port().unwrap_or(match uri.scheme() {
"wss" | "https" => 443,
_ => 80,
});
let addr = format!("{domain}:{port}");
let stream = TcpStream::connect(addr).await.unwrap();
let host = uri.host_str().unwrap();
let req = Request::builder()
.method("GET")
.uri(uri.path())
.header("Host", host)
.header(hyper::header::UPGRADE, "websocket")
.header(hyper::header::CONNECTION, "Upgrade")
.header(
"Sec-WebSocket-Key",
fastwebsockets::handshake::generate_key(),
)
.header("Sec-WebSocket-Version", "13")
.body(http_body_util::Empty::<Bytes>::new())
.unwrap();
fastwebsockets::handshake::client(&SpawnExecutor, req, stream)
.await
.unwrap()
}
fn ignore_script_parsed(msg: &str) -> bool {
!msg.starts_with(r#"{"method":"Debugger.scriptParsed","#)
}
struct StdErrLines {
reader: Box<dyn Iterator<Item = String>>,
check_lines: Vec<String>,
}
impl StdErrLines {
pub fn new(stderr: ChildStderr) -> Self {
Self {
reader: Box::new(std::io::BufReader::new(stderr).lines().map(|r| {
let line = r.unwrap();
eprintln!("STDERR: {}", line);
line
})),
check_lines: Default::default(),
}
}
pub fn next(&mut self) -> Option<String> {
loop {
let line = util::strip_ansi_codes(&self.reader.next()?).to_string();
if line.starts_with("Check") || line.starts_with("Download") {
self.check_lines.push(line);
} else {
return Some(line);
}
}
}
pub fn assert_lines(&mut self, expected_lines: &[&str]) {
let mut expected_index = 0;
loop {
let line = self.next().unwrap();
assert_eq!(line, expected_lines[expected_index]);
expected_index += 1;
if expected_index >= expected_lines.len() {
break;
}
}
}
pub fn extract_ws_url(&mut self) -> url::Url {
let stderr_first_line = self.next().unwrap();
assert_starts_with!(&stderr_first_line, "Debugger listening on ");
let v: Vec<_> = stderr_first_line.match_indices("ws:").collect();
assert_eq!(v.len(), 1);
let ws_url_index = v[0].0;
let ws_url = &stderr_first_line[ws_url_index..];
url::Url::parse(ws_url).unwrap()
}
}
struct InspectorTester {
socket: FragmentCollector<TokioIo<Upgraded>>,
notification_filter: Box<dyn FnMut(&str) -> bool + 'static>,
child: DenoChild,
stderr_lines: StdErrLines,
stdout_lines: Box<dyn Iterator<Item = String>>,
}
impl Drop for InspectorTester {
fn drop(&mut self) {
_ = self.child.kill();
}
}
impl InspectorTester {
async fn create<F>(mut child: DenoChild, notification_filter: F) -> Self
where
F: FnMut(&str) -> bool + 'static,
{
let stdout = child.stdout.take().unwrap();
let stdout_lines = std::io::BufReader::new(stdout).lines().map(|r| {
let line = r.unwrap();
eprintln!("STDOUT: {}", line);
line
});
let stderr = child.stderr.take().unwrap();
let mut stderr_lines = StdErrLines::new(stderr);
let uri = stderr_lines.extract_ws_url();
let (socket, response) = connect_to_ws(uri).await;
assert_eq!(response.status(), 101); // Switching protocols.
Self {
socket: FragmentCollector::new(socket),
notification_filter: Box::new(notification_filter),
child,
stderr_lines,
stdout_lines: Box::new(stdout_lines),
}
}
async fn send_many(&mut self, messages: &[serde_json::Value]) {
// TODO(bartlomieju): add graceful error handling
for msg in messages {
let result = self
.socket
.write_frame(Frame::text(msg.to_string().into_bytes().into()))
.await
.map_err(|e| anyhow!(e));
self.handle_error(result);
}
}
async fn send(&mut self, message: serde_json::Value) {
self.send_many(&[message]).await;
}
fn handle_error<T>(&mut self, result: Result<T, AnyError>) -> T {
match result {
Ok(result) => result,
Err(err) => {
let mut stdout = vec![];
for line in self.stdout_lines.by_ref() {
stdout.push(line);
}
let mut stderr = vec![];
while let Some(line) = self.stderr_lines.next() {
stderr.push(line);
}
let stdout = stdout.join("\n");
let stderr = stderr.join("\n");
self.child.kill().unwrap();
self.child.wait().unwrap();
panic!(
"Inspector test failed with error: {err:?}.\nstdout:\n{stdout}\nstderr:\n{stderr}"
);
}
}
}
async fn recv(&mut self) -> String {
loop {
// In the rare case this locks up, don't wait longer than one minute
let result = timeout(Duration::from_secs(60), self.socket.read_frame())
.await
.expect("recv() timeout")
.map_err(|e| anyhow!(e));
let message =
String::from_utf8(self.handle_error(result).payload.to_vec()).unwrap();
if (self.notification_filter)(&message) {
return message;
}
}
}
async fn recv_as_json(&mut self) -> serde_json::Value {
let msg = self.recv().await;
serde_json::from_str(&msg).unwrap()
}
async fn assert_received_messages(
&mut self,
responses: &[&str],
notifications: &[&str],
) {
let expected_messages = responses.len() + notifications.len();
let mut responses_idx = 0;
let mut notifications_idx = 0;
for _ in 0..expected_messages {
let msg = self.recv().await;
if msg.starts_with(r#"{"id":"#) {
assert!(
msg.starts_with(responses[responses_idx]),
"Doesn't start with {}, instead received {}",
responses[responses_idx],
msg
);
responses_idx += 1;
} else {
assert!(
msg.starts_with(notifications[notifications_idx]),
"Doesn't start with {}, instead received {}",
notifications[notifications_idx],
msg
);
notifications_idx += 1;
}
}
}
fn stderr_line(&mut self) -> String {
self.stderr_lines.next().unwrap()
}
fn stdout_line(&mut self) -> String {
self.stdout_lines.next().unwrap()
}
fn assert_stderr_for_inspect(&mut self) {
self
.stderr_lines
.assert_lines(&["Visit chrome://inspect to connect to the debugger."]);
}
fn assert_stderr_for_inspect_brk(&mut self) {
self.stderr_lines.assert_lines(&[
"Visit chrome://inspect to connect to the debugger.",
"Deno is waiting for debugger to connect.",
]);
}
}
fn inspect_flag_with_unique_port(flag_prefix: &str) -> String {
use std::sync::atomic::AtomicU16;
use std::sync::atomic::Ordering;
static PORT: AtomicU16 = AtomicU16::new(9229);
let port = PORT.fetch_add(1, Ordering::Relaxed);
format!("{flag_prefix}=127.0.0.1:{port}")
}
#[test]
async fn inspector_connect() {
let script = util::testdata_path().join("inspector/inspector1.js");
let mut child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect"))
.arg(script)
.stderr_piped()
.spawn()
.unwrap();
let stderr = child.stderr.take().unwrap();
let mut stderr_lines = StdErrLines::new(stderr);
let ws_url = stderr_lines.extract_ws_url();
let (_socket, response) = connect_to_ws(ws_url).await;
assert_eq!("101 Switching Protocols", response.status().to_string());
child.kill().unwrap();
child.wait().unwrap();
}
#[test(flaky)]
async fn inspector_break_on_first_line() {
let script = util::testdata_path().join("inspector/inspector2.js");
let child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect-brk"))
.arg(script)
.piped_output()
.spawn()
.unwrap();
let mut tester = InspectorTester::create(child, ignore_script_parsed).await;
tester.assert_stderr_for_inspect_brk();
tester
.send_many(&[
json!({"id":1,"method":"Runtime.enable"}),
json!({"id":2,"method":"Debugger.enable"}),
])
.await;
tester.assert_received_messages(
&[
r#"{"id":1,"result":{}}"#,
r#"{"id":2,"result":{"debuggerId":"#,
],
&[
r#"{"method":"Runtime.executionContextCreated","params":{"context":{"id":1,"#,
],
)
.await;
tester
.send(json!({"id":3,"method":"Runtime.runIfWaitingForDebugger"}))
.await;
tester
.assert_received_messages(
&[r#"{"id":3,"result":{}}"#],
&[r#"{"method":"Debugger.paused","#],
)
.await;
tester
.send(json!({
"id":4,
"method":"Runtime.evaluate",
"params":{
"expression":"Deno[Deno.internal].core.print(\"hello from the inspector\\n\")",
"contextId":1,
"includeCommandLineAPI":true,
"silent":false,
"returnByValue":true
}
}))
.await;
tester
.assert_received_messages(
&[r#"{"id":4,"result":{"result":{"type":"object","subtype":"null","value":null}}}"#],
&[],
)
.await;
assert_eq!(
&tester.stdout_lines.next().unwrap(),
"hello from the inspector"
);
tester
.send(json!({"id":5,"method":"Debugger.resume"}))
.await;
tester
.assert_received_messages(&[r#"{"id":5,"result":{}}"#], &[])
.await;
assert_eq!(
&tester.stdout_lines.next().unwrap(),
"hello from the script"
);
tester.child.kill().unwrap();
tester.child.wait().unwrap();
}
#[test]
async fn inspector_pause() {
let script = util::testdata_path().join("inspector/inspector1.js");
let child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect"))
.arg(script)
.piped_output()
.spawn()
.unwrap();
let mut tester = InspectorTester::create(child, ignore_script_parsed).await;
tester
.send(json!({"id":6,"method":"Debugger.enable"}))
.await;
tester
.assert_received_messages(&[r#"{"id":6,"result":{"debuggerId":"#], &[])
.await;
tester
.send(json!({"id":31,"method":"Debugger.pause"}))
.await;
tester
.assert_received_messages(&[r#"{"id":31,"result":{}}"#], &[])
.await;
tester.child.kill().unwrap();
tester.child.wait().unwrap();
}
#[test(flaky)]
fn inspector_port_collision() {
// Skip this test on WSL, which allows multiple processes to listen on the
// same port, rather than making `bind()` fail with `EADDRINUSE`. We also
// skip this test on Windows because it will occasionally flake, possibly
// due to a similar issue.
if (cfg!(target_os = "linux")
&& std::env::var_os("WSL_DISTRO_NAME").is_some())
|| cfg!(windows)
{
return;
}
let script = util::testdata_path().join("inspector/inspector1.js");
let inspect_flag = inspect_flag_with_unique_port("--inspect");
let mut child1 = util::deno_cmd()
.arg("run")
.arg(&inspect_flag)
.arg(script.clone())
.stderr_piped()
.spawn()
.unwrap();
let stderr_1 = child1.stderr.take().unwrap();
let mut stderr_1_lines = StdErrLines::new(stderr_1);
let _ = stderr_1_lines.extract_ws_url();
let mut child2 = util::deno_cmd()
.arg("run")
.arg(&inspect_flag)
.arg(script)
.stderr_piped()
.spawn()
.unwrap();
let stderr_2 = child2.stderr.as_mut().unwrap();
let stderr_2_error_message = std::io::BufReader::new(stderr_2)
.lines()
.map(|r| r.unwrap())
.inspect(|line| assert!(!line.contains("Debugger listening")))
.find(|line| line.contains("Failed to start inspector server"));
assert!(stderr_2_error_message.is_some());
child1.kill().unwrap();
child1.wait().unwrap();
child2.wait().unwrap();
}
#[test(flaky)]
async fn inspector_does_not_hang() {
let script = util::testdata_path().join("inspector/inspector3.js");
let child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect-brk"))
.env("NO_COLOR", "1")
.arg(script)
.piped_output()
.spawn()
.unwrap();
let mut tester = InspectorTester::create(child, ignore_script_parsed).await;
tester.assert_stderr_for_inspect_brk();
tester
.send_many(&[
json!({"id":1,"method":"Runtime.enable"}),
json!({"id":2,"method":"Debugger.enable"}),
json!({"id":3,"method":"Debugger.setBlackboxPatterns","params":{"patterns":["/node_modules/|/bower_components/"]}}),
])
.await;
tester.assert_received_messages(
&[
r#"{"id":1,"result":{}}"#,
r#"{"id":2,"result":{"debuggerId":"#,
r#"{"id":3,"result":"#,
],
&[
r#"{"method":"Runtime.executionContextCreated","params":{"context":{"id":1,"#
],
)
.await;
tester
.send(json!({"id":4,"method":"Runtime.runIfWaitingForDebugger"}))
.await;
tester
.assert_received_messages(
&[r#"{"id":4,"result":{}}"#],
&[r#"{"method":"Debugger.paused","#],
)
.await;
tester
.send(json!({"id":5,"method":"Debugger.resume"}))
.await;
tester
.assert_received_messages(
&[r#"{"id":5,"result":{}}"#],
&[r#"{"method":"Debugger.resumed","params":{}}"#],
)
.await;
for i in 0..128u32 {
let request_id = i + 10;
// Expect the number {i} on stdout.
let s = i.to_string();
assert_eq!(tester.stdout_lines.next().unwrap(), s);
tester
.assert_received_messages(
&[],
&[
r#"{"method":"Runtime.consoleAPICalled","#,
r#"{"method":"Debugger.paused","#,
],
)
.await;
tester
.send(json!({"id":request_id,"method":"Debugger.resume"}))
.await;
tester
.assert_received_messages(
&[&format!(r#"{{"id":{request_id},"result":{{}}}}"#)],
&[r#"{"method":"Debugger.resumed","params":{}}"#],
)
.await;
}
// Check that we can gracefully close the websocket connection.
tester
.socket
.write_frame(Frame::close_raw(vec![].into()))
.await
.unwrap();
assert_eq!(&tester.stdout_lines.next().unwrap(), "done");
// TODO(bartlomieju): this line makes no sense - if the inspector is connected then the
// process should not exit on its own.
// assert!(tester.child.wait().unwrap().success());
}
#[test]
fn inspector_without_brk_runs_code() {
let script = util::testdata_path().join("inspector/inspector4.js");
let mut child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect"))
.arg(script)
.piped_output()
.spawn()
.unwrap();
let stderr = child.stderr.take().unwrap();
let mut stderr_lines = StdErrLines::new(stderr);
let _ = stderr_lines.extract_ws_url();
// Check that inspector actually runs code without waiting for inspector
// connection.
let stdout = child.stdout.as_mut().unwrap();
let mut stdout_lines =
std::io::BufReader::new(stdout).lines().map(|r| r.unwrap());
let stdout_first_line = stdout_lines.next().unwrap();
assert_eq!(stdout_first_line, "hello");
child.kill().unwrap();
child.wait().unwrap();
}
#[test]
async fn inspector_runtime_evaluate_does_not_crash() {
let child = util::deno_cmd()
.arg("repl")
.arg("--allow-read")
.arg(inspect_flag_with_unique_port("--inspect"))
.env("RUST_BACKTRACE", "1")
.stdin(std::process::Stdio::piped())
.piped_output()
.spawn()
.unwrap();
let mut tester = InspectorTester::create(child, ignore_script_parsed).await;
let stdin = tester.child.stdin.take().unwrap();
tester.assert_stderr_for_inspect();
assert_starts_with!(&tester.stdout_line(), "Deno");
assert_eq!(
&tester.stdout_line(),
"exit using ctrl+d, ctrl+c, or close()"
);
assert_eq!(&tester.stderr_line(), "Debugger session started.");
tester
.send_many(&[
json!({"id":1,"method":"Runtime.enable"}),
json!({"id":2,"method":"Debugger.enable"}),
])
.await;
tester.assert_received_messages(
&[
r#"{"id":1,"result":{}}"#,
r#"{"id":2,"result":{"debuggerId":"#,
],
&[
r#"{"method":"Runtime.executionContextCreated","params":{"context":{"id":1,"#,
],
)
.await;
tester
.send(json!({
"id":3,
"method":"Runtime.compileScript",
"params":{
"expression":"Deno.cwd()",
"sourceURL":"",
"persistScript":false,
"executionContextId":1
}
}))
.await;
tester
.assert_received_messages(&[r#"{"id":3,"result":{}}"#], &[])
.await;
tester
.send(json!({
"id":4,
"method":"Runtime.evaluate",
"params":{
"expression":"Deno.cwd()",
"objectGroup":"console",
"includeCommandLineAPI":true,
"silent":false,
"contextId":1,
"returnByValue":true,
"generatePreview":true,
"userGesture":true,
"awaitPromise":false,
"replMode":true
}
}))
.await;
tester
.assert_received_messages(
&[r#"{"id":4,"result":{"result":{"type":"string","value":""#],
&[],
)
.await;
tester
.send(json!({
"id":5,
"method":"Runtime.evaluate",
"params":{
"expression":"console.error('done');",
"objectGroup":"console",
"includeCommandLineAPI":true,
"silent":false,
"contextId":1,
"returnByValue":true,
"generatePreview":true,
"userGesture":true,
"awaitPromise":false,
"replMode":true
}
}))
.await;
tester
.assert_received_messages(
&[r#"{"id":5,"result":{"result":{"type":"undefined"}}}"#],
&[r#"{"method":"Runtime.consoleAPICalled"#],
)
.await;
assert_eq!(&tester.stderr_line(), "done");
drop(stdin);
tester.child.wait().unwrap();
}
#[test]
async fn inspector_json() {
let script = util::testdata_path().join("inspector/inspector1.js");
let mut child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect"))
.arg(script)
.stderr_piped()
.spawn()
.unwrap();
let stderr = child.stderr.take().unwrap();
let mut stderr_lines = StdErrLines::new(stderr);
let ws_url = stderr_lines.extract_ws_url();
let mut url = ws_url.clone();
let _ = url.set_scheme("http");
url.set_path("/json");
let client = reqwest::Client::new();
// Ensure that the webSocketDebuggerUrl matches the host header
for (host, expected) in [
(None, ws_url.as_str()),
(Some("some.random.host"), "ws://some.random.host/"),
(Some("some.random.host:1234"), "ws://some.random.host:1234/"),
(Some("[::1]:1234"), "ws://[::1]:1234/"),
] {
let mut req = reqwest::Request::new(reqwest::Method::GET, url.clone());
if let Some(host) = host {
req.headers_mut().insert(
reqwest::header::HOST,
reqwest::header::HeaderValue::from_static(host),
);
}
let resp = client.execute(req).await.unwrap();
assert_eq!(resp.status(), reqwest::StatusCode::OK);
let endpoint_list: Vec<serde_json::Value> =
serde_json::from_str(&resp.text().await.unwrap()).unwrap();
let matching_endpoint = endpoint_list.iter().find(|e| {
e["webSocketDebuggerUrl"]
.as_str()
.unwrap()
.contains(expected)
});
assert!(matching_endpoint.is_some());
}
child.kill().unwrap();
child.wait().unwrap();
}
#[test]
async fn inspector_json_list() {
let script = util::testdata_path().join("inspector/inspector1.js");
let mut child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect"))
.arg(script)
.stderr_piped()
.spawn()
.unwrap();
let stderr = child.stderr.take().unwrap();
let mut stderr_lines = StdErrLines::new(stderr);
let ws_url = stderr_lines.extract_ws_url();
let mut url = ws_url.clone();
let _ = url.set_scheme("http");
url.set_path("/json/list");
let resp = reqwest::get(url).await.unwrap();
assert_eq!(resp.status(), reqwest::StatusCode::OK);
let endpoint_list: Vec<serde_json::Value> =
serde_json::from_str(&resp.text().await.unwrap()).unwrap();
let matching_endpoint = endpoint_list
.iter()
.find(|e| e["webSocketDebuggerUrl"] == ws_url.as_str());
assert!(matching_endpoint.is_some());
child.kill().unwrap();
}
#[test]
async fn inspector_connect_non_ws() {
// https://github.com/denoland/deno/issues/11449
// Verify we don't panic if non-WS connection is being established
let script = util::testdata_path().join("inspector/inspector1.js");
let mut child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect"))
.arg(script)
.stderr_piped()
.spawn()
.unwrap();
let stderr = child.stderr.take().unwrap();
let mut stderr_lines = StdErrLines::new(stderr);
let mut ws_url = stderr_lines.extract_ws_url();
// Change scheme to URL and try send a request. We're not interested
// in the request result, just that the process doesn't panic.
ws_url.set_scheme("http").unwrap();
let resp = reqwest::get(ws_url).await.unwrap();
assert_eq!("400 Bad Request", resp.status().to_string());
child.kill().unwrap();
child.wait().unwrap();
}
#[test(flaky)]
async fn inspector_break_on_first_line_in_test() {
let script = util::testdata_path().join("inspector/inspector_test.js");
let child = util::deno_cmd()
.arg("test")
.arg(inspect_flag_with_unique_port("--inspect-brk"))
.arg(script)
.env("NO_COLOR", "1")
.piped_output()
.spawn()
.unwrap();
let mut tester = InspectorTester::create(child, ignore_script_parsed).await;
tester.assert_stderr_for_inspect_brk();
tester
.send_many(&[
json!({"id":1,"method":"Runtime.enable"}),
json!({"id":2,"method":"Debugger.enable"}),
])
.await;
tester.assert_received_messages(
&[
r#"{"id":1,"result":{}}"#,
r#"{"id":2,"result":{"debuggerId":"#,
],
&[
r#"{"method":"Runtime.executionContextCreated","params":{"context":{"id":1,"#,
],
)
.await;
tester
.send(json!({"id":3,"method":"Runtime.runIfWaitingForDebugger"}))
.await;
tester
.assert_received_messages(
&[r#"{"id":3,"result":{}}"#],
&[r#"{"method":"Debugger.paused","#],
)
.await;
tester
.send(json!({
"id":4,
"method":"Runtime.evaluate",
"params":{
"expression":"1 + 1",
"contextId":1,
"includeCommandLineAPI":true,
"silent":false,
"returnByValue":true
}
}))
.await;
tester.assert_received_messages(
&[r#"{"id":4,"result":{"result":{"type":"number","value":2,"description":"2"}}}"#],
&[],
)
.await;
tester
.send(json!({"id":5,"method":"Debugger.resume"}))
.await;
tester
.assert_received_messages(&[r#"{"id":5,"result":{}}"#], &[])
.await;
assert_starts_with!(&tester.stdout_line(), "running 1 test from");
let line = tester.stdout_line();
assert_contains!(line, "basic test ... ok");
tester.child.kill().unwrap();
tester.child.wait().unwrap();
}
#[test]
async fn inspector_with_ts_files() {
let script = util::testdata_path().join("inspector/test.ts");
let child = util::deno_cmd()
.arg("run")
.arg("--check")
.arg(inspect_flag_with_unique_port("--inspect-brk"))
.arg(script)
.piped_output()
.spawn()
.unwrap();
// Helper function to check if a scriptParsed message is for a testdata/inspector file
// by checking the actual URL field (not stackTrace which may reference test files).
fn is_test_script(msg: &str) -> bool {
if !msg.starts_with(r#"{"method":"Debugger.scriptParsed","#) {
return false;
}
if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(msg)
&& let Some(url) = parsed
.get("params")
.and_then(|p| p.get("url"))
.and_then(|u| u.as_str())
{
return url.contains("testdata/inspector");
}
false
}
fn notification_filter(msg: &str) -> bool {
(msg.starts_with(r#"{"method":"Debugger.scriptParsed","#)
&& msg.contains("testdata/inspector"))
|| !msg.starts_with(r#"{"method":"Debugger.scriptParsed","#)
}
let mut tester = InspectorTester::create(child, notification_filter).await;
tester.assert_stderr_for_inspect_brk();
assert_eq!(&tester.stderr_line(), "Debugger session started.");
tester
.send_many(&[
json!({"id":1,"method":"Runtime.enable"}),
json!({"id":2,"method":"Debugger.enable"}),
])
.await;
tester
.assert_received_messages(
&[r#"{"id":1,"result":{}}"#],
&[
r#"{"method":"Runtime.executionContextCreated","params":{"context":{"id":1,"#,
],
)
.await;
// Collect scriptParsed messages for test files until we have all 3.
// With recent V8 changes, internal scripts may include stackTrace with test file references,
// so we need to filter by the actual URL field rather than just string matching.
let mut scripts = Vec::new();
let mut debugger_response = None;
while scripts.len() < 3 {
let msg = tester.recv().await;
if is_test_script(&msg) {
scripts.push(msg);
} else if msg.starts_with(r#"{"id":2,"result":{"debuggerId":"#) {
debugger_response = Some(msg);
}
// Ignore other scriptParsed messages (internal scripts)
}
// Helper to get URL from scriptParsed JSON (only the actual url, not stackTrace)
fn get_script_url(msg: &str) -> Option<String> {
serde_json::from_str::<serde_json::Value>(msg)
.ok()
.and_then(|v| v["params"]["url"].as_str().map(|s| s.to_string()))
}
let script1 = scripts
.iter()
.find(|s| {
get_script_url(s)
.map(|url| url.contains("testdata/inspector/test.ts"))
.unwrap_or(false)
})
.expect("should have test.ts")
.clone();
let script1_id = {
let v: serde_json::Value = serde_json::from_str(&script1).unwrap();
v["params"]["scriptId"].as_str().unwrap().to_string()
};
let script2 = scripts
.iter()
.find(|s| {
get_script_url(s)
.map(|url| url.contains("testdata/inspector/foo.ts"))
.unwrap_or(false)
})
.expect("should have foo.ts")
.clone();
let script2_id = {
let v: serde_json::Value = serde_json::from_str(&script2).unwrap();
v["params"]["scriptId"].as_str().unwrap().to_string()
};
let script3 = scripts
.iter()
.find(|s| {
get_script_url(s)
.map(|url| url.contains("testdata/inspector/bar.js"))
.unwrap_or(false)
})
.expect("should have bar.js")
.clone();
let script3_id = {
let v: serde_json::Value = serde_json::from_str(&script3).unwrap();
v["params"]["scriptId"].as_str().unwrap().to_string()
};
// If we haven't received the Debugger.enable response yet, get it now
if debugger_response.is_none() {
tester
.assert_received_messages(&[r#"{"id":2,"result":{"debuggerId":"#], &[])
.await;
}
tester
.send(json!({"id":3,"method":"Runtime.runIfWaitingForDebugger"}))
.await;
tester
.assert_received_messages(
&[r#"{"id":3,"result":{}}"#],
&[r#"{"method":"Debugger.paused","#],
)
.await;
tester.send_many(
&[
json!({"id":4,"method":"Debugger.getScriptSource","params":{"scriptId":script1_id.as_str()}}),
json!({"id":5,"method":"Debugger.getScriptSource","params":{"scriptId":script2_id.as_str()}}),
json!({"id":6,"method":"Debugger.getScriptSource","params":{"scriptId":script3_id.as_str()}}),
])
.await;
tester.assert_received_messages(
&[
r#"{"id":4,"result":{"scriptSource":"import { foo } from \"./foo.ts\";\nimport { bar } from \"./bar.js\";\nconsole.log(foo());\nconsole.log(bar());\n//# sourceMappingURL=data:application/json;base64,"#,
r#"{"id":5,"result":{"scriptSource":"class Foo {\n hello() {\n return \"hello\";\n }\n}\nexport function foo() {\n const f = new Foo();\n return f.hello();\n}\n//# sourceMappingURL=data:application/json;base64,"#,
r#"{"id":6,"result":{"scriptSource":"export function bar() {\n return \"world\";\n}\n"#,
],
&[],
)
.await;
tester
.send(json!({"id":7,"method":"Debugger.resume"}))
.await;
tester
.assert_received_messages(&[r#"{"id":7,"result":{}}"#], &[])
.await;
assert_eq!(&tester.stdout_line(), "hello");
assert_eq!(&tester.stdout_line(), "world");
tester.assert_received_messages(
&[],
&[
r#"{"method":"Debugger.resumed","params":{}}"#,
r#"{"method":"Runtime.consoleAPICalled","#,
r#"{"method":"Runtime.consoleAPICalled","#,
r#"{"method":"Runtime.executionContextDestroyed","params":{"executionContextId":1"#,
],
)
.await;
assert_eq!(
&tester.stderr_line(),
"Program finished. Waiting for inspector to disconnect to exit the process..."
);
assert!(!tester.stderr_lines.check_lines.is_empty());
tester.child.kill().unwrap();
tester.child.wait().unwrap();
}
#[test]
async fn inspector_memory() {
let script = util::testdata_path().join("inspector/memory.js");
let child = util::deno_cmd()
.arg("run")
.arg(inspect_flag_with_unique_port("--inspect-brk"))
.arg(script)
.env("RUST_BACKTRACE", "1")
.piped_output()
.spawn()
.unwrap();
let mut tester = InspectorTester::create(child, ignore_script_parsed).await;
tester.assert_stderr_for_inspect_brk();
// Send all setup commands at once
tester
.send_many(&[
json!({"id":1,"method":"Runtime.enable"}),
json!({"id":2,"method":"Debugger.enable"}),
json!({"id":3,"method":"Runtime.runIfWaitingForDebugger"}),
json!({"id":4,"method":"HeapProfiler.enable"}),
])
.await;
// Collect responses - CDP messages can arrive in any order
let mut got_runtime = false;
let mut got_debugger = false;
let mut got_run = false;
let mut got_heap = false;
let mut got_context = false;
let mut got_paused = false;
for _ in 0..15 {
let msg = tester.recv().await;
if msg.starts_with(r#"{"id":1,"result":{}}"#) {
got_runtime = true;
}
if msg.starts_with(r#"{"id":2,"result":{"debuggerId":"#) {
got_debugger = true;
}
if msg.starts_with(r#"{"id":3,"result":{}}"#) {
got_run = true;
}
if msg.starts_with(r#"{"id":4,"result":{}}"#) {
got_heap = true;
}
if msg.starts_with(r#"{"method":"Runtime.executionContextCreated"#) {
got_context = true;
}
if msg.starts_with(r#"{"method":"Debugger.paused""#) {
got_paused = true;
}
if got_runtime
&& got_debugger
&& got_run
&& got_heap
&& got_context
&& got_paused
{
break;
}
}
assert!(got_runtime, "Expected Runtime.enable response");
assert!(got_debugger, "Expected Debugger.enable response");
assert!(got_run, "Expected runIfWaitingForDebugger response");
assert!(got_heap, "Expected HeapProfiler.enable response");
assert!(got_context, "Expected executionContextCreated notification");
assert!(got_paused, "Expected Debugger.paused notification");
tester
.send(json!({"id":5,"method":"Runtime.getHeapUsage", "params": {}}))
.await;
let json_msg = tester.recv_as_json().await;
assert_eq!(json_msg["id"].as_i64().unwrap(), 5);
let result = &json_msg["result"];
assert!(
result["usedSize"].as_i64().unwrap()
<= result["totalSize"].as_i64().unwrap()
);
tester
.send(json!({
"id":6,
"method":"HeapProfiler.takeHeapSnapshot",
"params": {
"reportProgress": true,
"treatGlobalObjectsAsRoots": true,
"captureNumberValue": false
}
}))
.await;
let mut progress_report_completed = false;
loop {
let msg = tester.recv().await;
// TODO(bartlomieju): can be abstracted
if !progress_report_completed
&& msg.starts_with(
r#"{"method":"HeapProfiler.reportHeapSnapshotProgress","params""#,
)
{
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/lsp_tests.rs | tests/integration/lsp_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::fs;
use pretty_assertions::assert_eq;
use serde::Deserialize;
use serde::Serialize;
use serde_json::Value;
use serde_json::json;
use test_util::TestContextBuilder;
use test_util::assert_starts_with;
use test_util::assertions::assert_json_subset;
use test_util::eprintln;
use test_util::lsp::LspClient;
use test_util::lsp::range_of;
use test_util::lsp::source_file;
use test_util::test;
use test_util::testdata_path;
use test_util::url_to_notebook_cell_uri;
use test_util::url_to_uri;
use tower_lsp::lsp_types as lsp;
use url::Url;
#[test(timeout = 300)]
fn lsp_startup_shutdown() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let mut client = context.new_lsp_command().build();
client.initialize_default();
client.shutdown();
assert!(client.wait_exit().unwrap().success());
}
#[test(timeout = 300)]
fn lsp_config_setting() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write(
"lib.tsconfig.json",
r#"{
"compilerOptions": {
"lib": ["deno.ns", "deno.unstable", "dom"]
}
}"#,
);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_config("lib.tsconfig.json");
});
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": "file:///a/file.ts",
"languageId": "typescript",
"version": 1,
"text": "location.pathname;\n"
}
}));
assert_eq!(diagnostics.all().len(), 0);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_config_setting_compiler_options_types() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write(
"types.tsconfig.json",
r#"{
"compilerOptions": {
"types": ["./a.d.ts"]
},
"lint": {
"rules": {
"tags": []
}
}
}"#,
);
let a_dts = "// deno-lint-ignore-file no-var\ndeclare var a: string;";
temp_dir.write("a.d.ts", a_dts);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder
.set_config("types.tsconfig.json")
// avoid finding the declaration file via the document preload
.set_preload_limit(0);
});
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("test.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": "console.log(a);\n"
}
}));
assert_eq!(json!(diagnostics.all()), json!([]));
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_config_setting_compiler_options_types_config_sub_dir() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
let sub_dir = temp_dir.path().join("sub_dir");
sub_dir.create_dir_all();
sub_dir.join("types.tsconfig.json").write(
r#"{
"compilerOptions": {
"types": ["./a.d.ts"]
},
"lint": {
"rules": {
"tags": []
}
}
}"#,
);
let a_dts = "// deno-lint-ignore-file no-var\ndeclare var a: string;";
sub_dir.join("a.d.ts").write(a_dts);
temp_dir.write("deno.json", "{}");
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder
.set_config("sub_dir/types.tsconfig.json")
// avoid finding the declaration file via the document preload
.set_preload_limit(0);
});
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("test.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": "console.log(a);\n"
}
}));
assert_eq!(json!(diagnostics.all()), json!([]));
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_triple_slash_types() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
let a_dts = "// deno-lint-ignore-file no-var\ndeclare var a: string;";
temp_dir.write("a.d.ts", a_dts);
let mut client = context.new_lsp_command().build();
client.initialize_default();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("test.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": "/// <reference types=\"./a.d.ts\" />\n\nconsole.log(a);\n"
}
}));
assert_eq!(diagnostics.all().len(), 0);
client.shutdown();
}
#[test(timeout = 300)]
fn unadded_dependency_message_with_import_map() {
let context = TestContextBuilder::new()
.use_http_server()
.use_temp_cwd()
.build();
let temp_dir = context.temp_dir();
temp_dir.write(
"import_map.json",
json!({
"imports": {
}
})
.to_string(),
);
temp_dir.write(
"deno.json",
json!({
"importMap": "import_map.json".to_string(),
})
.to_string(),
);
temp_dir.write(
"file.ts",
r#"
import * as x from "@std/fs";
"#,
);
let mut client = context.new_lsp_command().build();
client.initialize_default();
client.cache_specifier(temp_dir.url().join("file.ts").unwrap());
client.read_diagnostics();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("file.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": temp_dir.read_to_string("file.ts"),
}
}));
// expected lsp_messages don't include the file path
let mut expected_lsp_messages = Vec::from([
"`x` is never used\nIf this is intentional, prefix it with an underscore like `_x`",
"'x' is declared but its value is never read.",
"Import \"@std/fs\" not a dependency and not in import map from \" hint: If you want to use the JSR package, try running `deno add jsr:@std/fs`",
]);
expected_lsp_messages.sort();
let all_diagnostics = diagnostics.all();
let mut correct_lsp_messages = all_diagnostics
.iter()
.map(|d| d.message.as_str())
.collect::<Vec<&str>>();
correct_lsp_messages.sort();
let part1 = correct_lsp_messages[1].split("file").collect::<Vec<_>>()[0];
let part2 = correct_lsp_messages[1].split('\n').collect::<Vec<_>>()[1];
let file_path_removed_from_message = format!("{} {}", part1, part2);
correct_lsp_messages[1] = file_path_removed_from_message.as_str();
assert_eq!(correct_lsp_messages, expected_lsp_messages);
client.shutdown();
}
#[test(timeout = 300)]
fn unadded_dependency_message() {
let context = TestContextBuilder::new()
.use_http_server()
.use_temp_cwd()
.build();
let temp_dir = context.temp_dir();
temp_dir.write(
"deno.json",
json!({
"imports": {
}
})
.to_string(),
);
temp_dir.write(
"file.ts",
r#"
import * as x from "@std/fs";
"#,
);
let mut client = context.new_lsp_command().build();
client.initialize_default();
client.cache_specifier(temp_dir.url().join("file.ts").unwrap());
client.read_diagnostics();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("file.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": temp_dir.read_to_string("file.ts"),
}
}));
// expected lsp_messages don't include the file path
let mut expected_lsp_messages = Vec::from([
"`x` is never used\nIf this is intentional, prefix it with an underscore like `_x`",
"'x' is declared but its value is never read.",
"Import \"@std/fs\" not a dependency and not in import map from \" hint: If you want to use the JSR package, try running `deno add jsr:@std/fs`",
]);
expected_lsp_messages.sort();
let all_diagnostics = diagnostics.all();
let mut correct_lsp_messages = all_diagnostics
.iter()
.map(|d| d.message.as_str())
.collect::<Vec<&str>>();
correct_lsp_messages.sort();
let part1 = correct_lsp_messages[1].split("file").collect::<Vec<_>>()[0];
let part2 = correct_lsp_messages[1].split('\n').collect::<Vec<_>>()[1];
let file_path_removed_from_message = format!("{} {}", part1, part2);
correct_lsp_messages[1] = file_path_removed_from_message.as_str();
assert_eq!(correct_lsp_messages, expected_lsp_messages);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
let import_map = r#"{
"imports": {
"/~/": "./lib/"
}
}"#;
temp_dir.write("import-map.json", import_map);
temp_dir.write("lib/b.ts", r#"export const b = "b";"#);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_import_map("import-map.json");
});
let uri = url_to_uri(&temp_dir.url().join("a.ts").unwrap()).unwrap();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": uri,
"languageId": "typescript",
"version": 1,
"text": "import { b } from \"/~/b.ts\";\n\nconsole.log(b);\n"
}
}));
assert_eq!(json!(diagnostics.all()), json!([]));
let res = client.write_request(
"textDocument/hover",
json!({
"textDocument": {
"uri": uri
},
"position": { "line": 2, "character": 12 }
}),
);
assert_eq!(
res,
json!({
"contents": {
"kind": "markdown",
"value": "```typescript\n(alias) const b: \"b\"\nimport b\n```",
},
"range": {
"start": { "line": 2, "character": 12 },
"end": { "line": 2, "character": 13 }
}
})
);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map_remote() {
let context = TestContextBuilder::new()
.use_http_server()
.use_temp_cwd()
.build();
let temp_dir = context.temp_dir();
temp_dir.write(
"file.ts",
r#"
import { printHello } from "print_hello";
printHello();
"#,
);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_import_map(
"http://localhost:4545/import_maps/import_map_remote.json",
);
});
client.cache_specifier(temp_dir.url().join("file.ts").unwrap());
client.read_diagnostics();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("file.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": temp_dir.read_to_string("file.ts"),
}
}));
assert_eq!(diagnostics.all(), vec![]);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map_data_url() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_import_map("data:application/json;utf8,{\"imports\": { \"example\": \"https://deno.land/x/example/mod.ts\" }}");
});
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("file.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": "import example from \"example\";\n"
}
}));
// This indicates that the import map is applied correctly.
assert!(diagnostics.all().iter().any(|diagnostic| {
diagnostic.code == Some(lsp::NumberOrString::String("no-cache".to_string()))
&& diagnostic
.message
.contains("https://deno.land/x/example/mod.ts")
}));
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map_config_file() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write(
"deno.import_map.jsonc",
r#"{
"importMap": "import-map.json"
}"#,
);
temp_dir.write(
"import-map.json",
r#"{
"imports": {
"/~/": "./lib/"
}
}"#,
);
temp_dir.write("lib/b.ts", r#"export const b = "b";"#);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_config("./deno.import_map.jsonc");
});
let uri = url_to_uri(&temp_dir.url().join("a.ts").unwrap()).unwrap();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": uri,
"languageId": "typescript",
"version": 1,
"text": "import { b } from \"/~/b.ts\";\n\nconsole.log(b);\n"
}
}));
assert_eq!(diagnostics.all().len(), 0);
let res = client.write_request(
"textDocument/hover",
json!({
"textDocument": {
"uri": uri
},
"position": { "line": 2, "character": 12 }
}),
);
assert_eq!(
res,
json!({
"contents": {
"kind": "markdown",
"value": "```typescript\n(alias) const b: \"b\"\nimport b\n```",
},
"range": {
"start": { "line": 2, "character": 12 },
"end": { "line": 2, "character": 13 }
}
})
);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map_embedded_in_config_file() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write(
"deno.embedded_import_map.jsonc",
r#"{
// some comment
"imports": {
"/~/": "./lib/"
}
}"#,
);
temp_dir.write("lib/b.ts", r#"export const b = "b";"#);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_config("./deno.embedded_import_map.jsonc");
});
let uri = url_to_uri(&temp_dir.url().join("a.ts").unwrap()).unwrap();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": uri,
"languageId": "typescript",
"version": 1,
"text": "import { b } from \"/~/b.ts\";\n\nconsole.log(b);\n"
}
}));
assert_eq!(diagnostics.all().len(), 0);
let res = client.write_request(
"textDocument/hover",
json!({
"textDocument": {
"uri": uri
},
"position": { "line": 2, "character": 12 }
}),
);
assert_eq!(
res,
json!({
"contents": {
"kind": "markdown",
"value": "```typescript\n(alias) const b: \"b\"\nimport b\n```",
},
"range": {
"start": { "line": 2, "character": 12 },
"end": { "line": 2, "character": 13 }
}
})
);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map_embedded_in_config_file_after_initialize() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write("deno.embedded_import_map.jsonc", "{}");
temp_dir.write("lib/b.ts", r#"export const b = "b";"#);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_config("./deno.embedded_import_map.jsonc");
});
let uri = url_to_uri(&temp_dir.url().join("a.ts").unwrap()).unwrap();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": uri,
"languageId": "typescript",
"version": 1,
"text": "import { b } from \"/~/b.ts\";\n\nconsole.log(b);\n"
}
}));
assert_eq!(diagnostics.all().len(), 1);
// update the import map
temp_dir.write(
"deno.embedded_import_map.jsonc",
r#"{
"imports": {
"/~/": "./lib/"
}
}"#,
);
client.did_change_watched_files(json!({
"changes": [{
"uri": url_to_uri(&temp_dir.url().join("deno.embedded_import_map.jsonc").unwrap()).unwrap(),
"type": 2
}]
}));
assert_eq!(json!(client.read_diagnostics().all()), json!([]));
let res = client.write_request(
"textDocument/hover",
json!({
"textDocument": {
"uri": uri
},
"position": { "line": 2, "character": 12 }
}),
);
assert_eq!(
res,
json!({
"contents": {
"kind": "markdown",
"value": "```typescript\n(alias) const b: \"b\"\nimport b\n```",
},
"range": {
"start": { "line": 2, "character": 12 },
"end": { "line": 2, "character": 13 }
}
})
);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map_config_file_auto_discovered() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write("lib/b.ts", r#"export const b = "b";"#);
let mut client = context.new_lsp_command().build();
client.initialize_default();
// add the deno.json
temp_dir.write("deno.jsonc", r#"{ "imports": { "/~/": "./lib/" } }"#);
client.did_change_watched_files(json!({
"changes": [{
"uri": url_to_uri(&temp_dir.url().join("deno.jsonc").unwrap()).unwrap(),
"type": 2
}]
}));
client.wait_until_stderr_line(|line| {
line.contains(" Resolved Deno configuration file:")
});
client.read_diagnostics();
let uri = url_to_uri(&temp_dir.url().join("a.ts").unwrap()).unwrap();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": uri,
"languageId": "typescript",
"version": 1,
"text": "import { b } from \"/~/b.ts\";\n\nconsole.log(b);\n"
}
}));
assert_eq!(diagnostics.all().len(), 0);
let res = client.write_request(
"textDocument/hover",
json!({
"textDocument": {
"uri": uri
},
"position": { "line": 2, "character": 12 }
}),
);
assert_eq!(
res,
json!({
"contents": {
"kind": "markdown",
"value": "```typescript\n(alias) const b: \"b\"\nimport b\n```",
},
"range": {
"start": { "line": 2, "character": 12 },
"end": { "line": 2, "character": 13 }
}
})
);
// now cause a syntax error
temp_dir.write("deno.jsonc", r#",,#,#,,"#);
client.did_change_watched_files(json!({
"changes": [{
"uri": url_to_uri(&temp_dir.url().join("deno.jsonc").unwrap()).unwrap(),
"type": 2
}]
}));
assert_eq!(client.read_diagnostics().all().len(), 1);
// now fix it, and things should work again
temp_dir.write("deno.jsonc", r#"{ "imports": { "/~/": "./lib/" } }"#);
client.did_change_watched_files(json!({
"changes": [{
"uri": url_to_uri(&temp_dir.url().join("deno.jsonc").unwrap()).unwrap(),
"type": 2
}]
}));
client.wait_until_stderr_line(|line| {
line.contains(" Resolved Deno configuration file:")
});
assert_eq!(json!(client.read_diagnostics().all()), json!([]));
let res = client.write_request(
"textDocument/hover",
json!({
"textDocument": {
"uri": uri
},
"position": { "line": 2, "character": 12 }
}),
);
assert_eq!(
res,
json!({
"contents": {
"kind": "markdown",
"value": "```typescript\n(alias) const b: \"b\"\nimport b\n```",
},
"range": {
"start": { "line": 2, "character": 12 },
"end": { "line": 2, "character": 13 }
}
})
);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map_config_file_auto_discovered_symlink() {
let context = TestContextBuilder::new()
// DO NOT COPY THIS CODE. Very rare case where we want to force the temp
// directory on the CI to not be a symlinked directory because we are
// testing a scenario with a symlink to a non-symlink in the same directory
// tree. Generally you want to ensure your code works in symlinked directories
// so don't use this unless you have a similar scenario.
.temp_dir_path(std::env::temp_dir().canonicalize().unwrap())
.use_temp_cwd()
.build();
let temp_dir = context.temp_dir();
temp_dir.write("lib/b.ts", r#"export const b = "b";"#);
let mut client = context.new_lsp_command().build();
client.initialize_default();
// now create a symlink in the current directory to a subdir/deno.json
// and ensure the watched files notification still works
temp_dir.write("subdir/deno.json", r#"{ }"#);
temp_dir.symlink_file(
temp_dir.path().join("subdir").join("deno.json"),
temp_dir.path().join("deno.json"),
);
client.did_change_watched_files(json!({
"changes": [{
// the client will give a watched file changed event for the symlink's target
"uri": temp_dir.path().join("subdir/deno.json").canonicalize().uri_file(),
"type": 2
}]
}));
client.read_diagnostics();
// this will discover the deno.json in the root
let search_line = format!(
" Resolved Deno configuration file: \"{}\"",
temp_dir.url().join("deno.json").unwrap().as_str()
);
client.wait_until_stderr_line(|line| line.contains(&search_line));
// now open a file which will cause a diagnostic because the import map is empty
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("a.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": "import { b } from \"/~/b.ts\";\n\nconsole.log(b);\n"
}
}));
assert_eq!(diagnostics.all().len(), 1);
// update the import map to have the imports now
temp_dir.write("subdir/deno.json", r#"{ "imports": { "/~/": "./lib/" } }"#);
client.did_change_watched_files(json!({
"changes": [{
// now still say that the target path has changed
"uri": temp_dir.path().join("subdir/deno.json").canonicalize().uri_file(),
"type": 2
}]
}));
assert_eq!(client.read_diagnostics().all().len(), 0);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_deno_config_setting_no_workspace() {
let context = TestContextBuilder::new()
.use_http_server()
.use_temp_cwd()
.build();
let temp_dir = context.temp_dir();
let file = temp_dir
.source_file("file.ts", "const foo_bar = 1;\nconsole.log(foo_bar);\n");
let temp_dir_for_deno_json = test_util::TempDir::new();
temp_dir_for_deno_json.write(
"deno.json",
json!({
"lint": {
"rules": { "include": ["camelcase"] },
},
})
.to_string(),
);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_workspace_folders(vec![]);
builder.set_deno_enable(true);
builder.set_config(temp_dir_for_deno_json.url().join("deno.json").unwrap());
});
let diagnostics = client.did_open_file(&file);
assert_eq!(
json!(diagnostics.all()),
json!([
{
"range": {
"start": { "line": 0, "character": 6 },
"end": { "line": 0, "character": 13 },
},
"severity": 2,
"code": "camelcase",
"source": "deno-lint",
"message": "Identifier 'foo_bar' is not in camel case.\nConsider renaming `foo_bar` to `fooBar`",
},
]),
);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_deno_json_imports_comments_cache() {
let context = TestContextBuilder::new()
.use_http_server()
.use_temp_cwd()
.build();
let temp_dir = context.temp_dir();
temp_dir.write(
"deno.jsonc",
r#"{
// comment
"imports": {
"print_hello": "http://localhost:4545/import_maps/print_hello.ts",
},
}"#,
);
temp_dir.write(
"file.ts",
r#"
import { printHello } from "print_hello";
printHello();
"#,
);
let mut client = context.new_lsp_command().build();
client.initialize_default();
client.cache_specifier(temp_dir.url().join("file.ts").unwrap());
client.read_diagnostics();
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("file.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": temp_dir.read_to_string("file.ts"),
}
}));
assert_eq!(diagnostics.all(), vec![]);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_import_map_node_specifiers() {
let context = TestContextBuilder::for_npm().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write("deno.json", r#"{ "imports": { "fs": "node:fs" } }"#);
// cache @types/node
context
.new_command()
.args("cache npm:@types/node")
.run()
.skip_output_check()
.assert_exit_code(0);
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.set_config("./deno.json");
});
let diagnostics = client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("a.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": "import fs from \"fs\";\nconsole.log(fs);"
}
}));
assert_eq!(diagnostics.all(), vec![]);
client.shutdown();
}
// Regression test for https://github.com/denoland/deno/issues/19802.
// Disable the `workspace/configuration` capability. Ensure the LSP falls back
// to using `enablePaths` from the `InitializationOptions`.
#[test(timeout = 300)]
fn lsp_workspace_enable_paths_no_workspace_configuration() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write("main_disabled.ts", "Date.now()");
temp_dir.write("main_enabled.ts", "Date.now()");
let mut client = context.new_lsp_command().build();
client.initialize(|builder| {
builder.with_capabilities(|capabilities| {
capabilities.workspace.as_mut().unwrap().configuration = Some(false);
});
builder.set_workspace_folders(vec![lsp::WorkspaceFolder {
uri: temp_dir.uri(),
name: "project".to_string(),
}]);
builder.set_root_uri(temp_dir.uri());
builder.set_enable_paths(vec!["./main_enabled.ts".to_string()]);
});
client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("main_disabled.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": temp_dir.read_to_string("main_disabled.ts"),
}
}));
client.did_open(json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("main_enabled.ts").unwrap()).unwrap(),
"languageId": "typescript",
"version": 1,
"text": temp_dir.read_to_string("main_enabled.ts"),
}
}));
let res = client.write_request(
"textDocument/hover",
json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("main_disabled.ts").unwrap()).unwrap(),
},
"position": { "line": 0, "character": 5 }
}),
);
assert_eq!(res, json!(null));
let res = client.write_request(
"textDocument/hover",
json!({
"textDocument": {
"uri": url_to_uri(&temp_dir.url().join("main_enabled.ts").unwrap()).unwrap(),
},
"position": { "line": 0, "character": 5 }
}),
);
assert_eq!(
res,
json!({
"contents": {
"kind": "markdown",
"value": "```typescript\n(method) DateConstructor.now(): number\n```\n\nReturns the number of milliseconds elapsed since midnight, January 1, 1970 Universal Coordinated Time (UTC).",
},
"range": {
"start": { "line": 0, "character": 5, },
"end": { "line": 0, "character": 8, }
}
})
);
client.shutdown();
}
#[test(timeout = 300)]
fn lsp_did_refresh_deno_configuration_tree_notification() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write(
"workspace/deno.json",
json!({
"workspace": [
"member1",
"member2",
],
})
.to_string(),
);
temp_dir.write("workspace/member1/deno.json", json!({}).to_string());
temp_dir.write("workspace/member1/package.json", json!({}).to_string());
temp_dir.write("workspace/member2/package.json", json!({}).to_string());
temp_dir.write("non_workspace1/deno.json", json!({}).to_string());
let mut client = context.new_lsp_command().build();
client.initialize_default();
let mut res = client
.read_notification_with_method::<Value>(
"deno/didRefreshDenoConfigurationTree",
)
.unwrap();
res.as_object_mut().unwrap().remove("denoDirNpmFolderUri");
assert_eq!(
res,
json!({
"data": [
{
"scopeUri": url_to_uri(&temp_dir.url().join("non_workspace1/").unwrap()).unwrap(),
"workspaceRootScopeUri": null,
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("non_workspace1/deno.json").unwrap()).unwrap(),
},
"packageJson": null,
},
{
"scopeUri": url_to_uri(&temp_dir.url().join("workspace/").unwrap()).unwrap(),
"workspaceRootScopeUri": null,
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/deno.json").unwrap()).unwrap(),
},
"packageJson": null,
},
{
"scopeUri": url_to_uri(&temp_dir.url().join("workspace/member1/").unwrap()).unwrap(),
"workspaceRootScopeUri": url_to_uri(&temp_dir.url().join("workspace/").unwrap()).unwrap(),
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/member1/deno.json").unwrap()).unwrap(),
},
"packageJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/member1/package.json").unwrap()).unwrap(),
},
},
{
"scopeUri": url_to_uri(&temp_dir.url().join("workspace/member2/").unwrap()).unwrap(),
"workspaceRootScopeUri": url_to_uri(&temp_dir.url().join("workspace/").unwrap()).unwrap(),
"denoJson": null,
"packageJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/member2/package.json").unwrap()).unwrap(),
},
},
],
}),
);
temp_dir.write("non_workspace2/deno.json", json!({}).to_string());
client.did_change_watched_files(json!({
"changes": [{
"uri": url_to_uri(&temp_dir.url().join("non_workspace2/deno.json").unwrap()).unwrap(),
"type": 1,
}],
}));
client.handle_refresh_diagnostics_request();
client.read_diagnostics();
let mut res = client
.read_notification_with_method::<Value>(
"deno/didRefreshDenoConfigurationTree",
)
.unwrap();
res.as_object_mut().unwrap().remove("denoDirNpmFolderUri");
assert_eq!(
res,
json!({
"data": [
{
"scopeUri": url_to_uri(&temp_dir.url().join("non_workspace1/").unwrap()).unwrap(),
"workspaceRootScopeUri": null,
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("non_workspace1/deno.json").unwrap()).unwrap(),
},
"packageJson": null,
},
{
"scopeUri": url_to_uri(&temp_dir.url().join("non_workspace2/").unwrap()).unwrap(),
"workspaceRootScopeUri": null,
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("non_workspace2/deno.json").unwrap()).unwrap(),
},
"packageJson": null,
},
{
"scopeUri": url_to_uri(&temp_dir.url().join("workspace/").unwrap()).unwrap(),
"workspaceRootScopeUri": null,
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/deno.json").unwrap()).unwrap(),
},
"packageJson": null,
},
{
"scopeUri": url_to_uri(&temp_dir.url().join("workspace/member1/").unwrap()).unwrap(),
"workspaceRootScopeUri": url_to_uri(&temp_dir.url().join("workspace/").unwrap()).unwrap(),
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/member1/deno.json").unwrap()).unwrap(),
},
"packageJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/member1/package.json").unwrap()).unwrap(),
},
},
{
"scopeUri": url_to_uri(&temp_dir.url().join("workspace/member2/").unwrap()).unwrap(),
"workspaceRootScopeUri": url_to_uri(&temp_dir.url().join("workspace/").unwrap()).unwrap(),
"denoJson": null,
"packageJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/member2/package.json").unwrap()).unwrap(),
},
},
],
}),
);
client.change_configuration(json!({
"deno": {
"disablePaths": ["non_workspace1"],
},
}));
let mut res = client
.read_notification_with_method::<Value>(
"deno/didRefreshDenoConfigurationTree",
)
.unwrap();
res.as_object_mut().unwrap().remove("denoDirNpmFolderUri");
assert_eq!(
res,
json!({
"data": [
{
"scopeUri": url_to_uri(&temp_dir.url().join("non_workspace2/").unwrap()).unwrap(),
"workspaceRootScopeUri": null,
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("non_workspace2/deno.json").unwrap()).unwrap(),
},
"packageJson": null,
},
{
"scopeUri": url_to_uri(&temp_dir.url().join("workspace/").unwrap()).unwrap(),
"workspaceRootScopeUri": null,
"denoJson": {
"uri": url_to_uri(&temp_dir.url().join("workspace/deno.json").unwrap()).unwrap(),
},
"packageJson": null,
},
{
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/eval_tests.rs | tests/integration/eval_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use test_util as util;
use test_util::test;
// Make sure that snapshot flags don't affect runtime.
#[test]
fn eval_randomness() {
let mut numbers = Vec::with_capacity(10);
for _ in 0..10 {
let output = util::deno_cmd()
.arg("eval")
.arg("-p")
.arg("Math.random()")
.stdout_piped()
.spawn()
.unwrap()
.wait_with_output()
.unwrap();
assert!(output.status.success());
let stdout_str = util::strip_ansi_codes(
std::str::from_utf8(&output.stdout).unwrap().trim(),
);
numbers.push(stdout_str.to_string());
}
numbers.dedup();
assert!(numbers.len() > 1);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/coverage_tests.rs | tests/integration/coverage_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use test_util as util;
use util::PathRef;
use util::TempDir;
use util::TestContext;
use util::TestContextBuilder;
use util::assert_contains;
use util::assert_starts_with;
use util::env_vars_for_npm_tests;
use util::eprintln;
use util::test;
#[test]
fn branch() {
run_coverage_text("branch", "ts");
}
#[test]
fn complex() {
run_coverage_text("complex", "ts");
}
#[test]
fn final_blankline() {
run_coverage_text("final_blankline", "js");
}
#[test]
fn ignore_file_directive() {
run_coverage_text("ignore_file_directive", "ts");
}
#[test]
fn ignore_next_directive() {
run_coverage_text("ignore_next_directive", "ts");
}
#[test]
fn ignore_range_directive() {
run_coverage_text("ignore_range_directive", "ts");
}
#[test]
fn no_snaps() {
no_snaps_included("no_snaps_included", "ts");
}
// TODO(mmastrac): The exclusion to make this test pass doesn't seem to work on windows.
#[cfg_attr(windows, ignore)]
#[test]
fn no_tests() {
no_tests_included("foo", "mts");
no_tests_included("foo", "ts");
no_tests_included("foo", "js");
}
#[test]
fn error_if_invalid_cache() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir_path = context.temp_dir().path();
let other_temp_dir = TempDir::new();
let other_tempdir = other_temp_dir.path().join("cov");
let invalid_cache_path = util::testdata_path().join("coverage/invalid_cache");
let mod_before_path = util::testdata_path()
.join(&invalid_cache_path)
.join("mod_before.ts");
let mod_after_path = util::testdata_path()
.join(&invalid_cache_path)
.join("mod_after.ts");
let mod_test_path = util::testdata_path()
.join(&invalid_cache_path)
.join("mod.test.ts");
let mod_temp_path = temp_dir_path.join("mod.ts");
let mod_test_temp_path = temp_dir_path.join("mod.test.ts");
// Write the initial mod.ts file
mod_before_path.copy(&mod_temp_path);
// And the test file
mod_test_path.copy(&mod_test_temp_path);
// Generate coverage
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
format!("--coverage={}", other_tempdir),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
// Modify the file between deno test and deno coverage, thus invalidating the cache
mod_after_path.copy(&mod_temp_path);
let output = context
.new_command()
.args_vec(vec!["coverage".to_string(), format!("{}/", other_tempdir)])
.run();
output.assert_exit_code(1);
let out = output.combined_output();
// Expect error
let error = util::strip_ansi_codes(out).to_string();
assert_contains!(error, "error: Missing transpiled source code");
assert_contains!(
error,
"Before generating coverage report, run `deno test --coverage` to ensure consistent state."
);
}
fn run_coverage_text(test_name: &str, extension: &str) {
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"-A".to_string(),
"--quiet".to_string(),
format!("--coverage={}", tempdir),
format!("coverage/{test_name}_test.{extension}"),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--detailed".to_string(),
"--quiet".to_string(),
format!("{}/", tempdir),
])
.split_output()
.run();
// Verify there's no "Check" being printed
assert_eq!(output.stderr(), "");
output.assert_stdout_matches_file(
util::testdata_path().join(format!("coverage/{test_name}_expected.out")),
);
output.assert_exit_code(0);
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--quiet".to_string(),
"--lcov".to_string(),
format!("{}/", tempdir),
])
.run();
output.assert_exit_code(0);
output.assert_matches_file(
util::testdata_path().join(format!("coverage/{test_name}_expected.lcov")),
);
}
#[test]
fn multifile_coverage() {
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
eprintln!("before test");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
format!("--coverage={}", tempdir),
format!("coverage/multifile/"),
])
.run();
eprintln!("after test");
output.assert_exit_code(0);
eprintln!("output {:#?}", output.print_output());
output.skip_output_check();
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--detailed".to_string(),
format!("{}/", tempdir),
])
.split_output()
.run();
// Verify there's no "Check" being printed
eprintln!("output2 {:#?}", output.print_output());
assert!(output.stderr().is_empty());
output.assert_stdout_matches_file(
util::testdata_path().join("coverage/multifile/expected.out"),
);
output.assert_exit_code(0);
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--quiet".to_string(),
"--lcov".to_string(),
format!("{}/", tempdir),
])
.run();
output.assert_exit_code(0);
output.assert_matches_file(
util::testdata_path().join("coverage/multifile/expected.lcov"),
);
}
fn no_snaps_included(test_name: &str, extension: &str) {
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
"--allow-read".to_string(),
format!("--coverage={}", tempdir),
"--config".to_string(),
"../config/deno.json".to_string(),
format!("coverage/no_snaps_included/{test_name}_test.{extension}"),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--include=no_snaps_included.ts".to_string(),
"--detailed".to_string(),
format!("{}/", tempdir),
])
.split_output()
.run();
// Verify there's no "Check" being printed
assert!(output.stderr().is_empty());
output.assert_stdout_matches_file(
util::testdata_path().join("coverage/no_snaps_included/expected.out"),
);
output.assert_exit_code(0);
}
fn no_tests_included(test_name: &str, extension: &str) {
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
"--allow-read".to_string(),
format!("--coverage={}", tempdir),
"--config".to_string(),
"../config/deno.json".to_string(),
format!("coverage/no_tests_included/{test_name}.test.{extension}"),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
format!("--exclude={}", util::std_path().canonicalize()),
"--detailed".to_string(),
format!("{}/", tempdir),
])
.split_output()
.run();
// Verify there's no "Check" being printed
assert!(output.stderr().is_empty());
output.assert_stdout_matches_file(
util::testdata_path().join("coverage/no_tests_included/expected.out"),
);
output.assert_exit_code(0);
}
#[test]
fn no_npm_cache_coverage() {
let context = TestContext::with_http_server();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
"--allow-read".to_string(),
format!("--coverage={}", tempdir),
format!("coverage/no_npm_coverage/no_npm_coverage_test.ts"),
])
.envs(env_vars_for_npm_tests())
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--detailed".to_string(),
format!("{}/", tempdir),
])
.split_output()
.run();
// Verify there's no "Check" being printed
assert!(output.stderr().is_empty());
output.assert_stdout_matches_file(
util::testdata_path().join("coverage/no_npm_coverage/expected.out"),
);
output.assert_exit_code(0);
}
#[test]
fn no_transpiled_lines() {
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
format!("--coverage={}", tempdir),
"--config".to_string(),
"../config/deno.json".to_string(),
"coverage/no_transpiled_lines/".to_string(),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--include=no_transpiled_lines/index.ts".to_string(),
"--detailed".to_string(),
format!("{}/", tempdir),
])
.run();
output.assert_exit_code(0);
output.assert_matches_file(
util::testdata_path().join("coverage/no_transpiled_lines/expected.out"),
);
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--lcov".to_string(),
"--include=no_transpiled_lines/index.ts".to_string(),
format!("{}/", tempdir),
])
.run();
output.assert_exit_code(0);
output.assert_matches_file(
util::testdata_path().join("coverage/no_transpiled_lines/expected.lcov"),
);
}
#[test]
fn no_internal_code() {
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
format!("--coverage={}", tempdir),
"coverage/no_internal_code_test.ts".to_string(),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
// Check that coverage files contain no internal urls
let paths = tempdir.read_dir();
for path in paths {
let unwrapped = PathRef::new(path.unwrap().path());
let data = unwrapped.read_to_string();
let value: serde_json::Value = serde_json::from_str(&data).unwrap();
let url = value["url"].as_str().unwrap();
assert_starts_with!(url, "file:");
}
}
#[test]
fn no_internal_node_code() {
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
"--no-check".to_string(),
format!("--coverage={}", tempdir),
"coverage/no_internal_node_code_test.ts".to_string(),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
// Check that coverage files contain no internal urls
let paths = tempdir.read_dir();
for path in paths {
let unwrapped = PathRef::new(path.unwrap().path());
let data = unwrapped.read_to_string();
let value: serde_json::Value = serde_json::from_str(&data).unwrap();
let url = value["url"].as_str().unwrap();
assert_starts_with!(url, "file:");
}
}
#[test]
fn no_http_coverage_data() {
let _http_server_guard = test_util::http_server();
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--allow-import".to_string(),
"--quiet".to_string(),
"--no-check".to_string(),
format!("--coverage={}", tempdir),
"coverage/no_http_coverage_data_test.ts".to_string(),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
// Check that coverage files contain no http urls
let paths = tempdir.read_dir();
for path in paths {
let unwrapped = PathRef::new(path.unwrap().path());
let data = unwrapped.read_to_string();
let value: serde_json::Value = serde_json::from_str(&data).unwrap();
let url = value["url"].as_str().unwrap();
assert_starts_with!(url, "file:");
}
}
#[test]
fn test_html_reporter() {
// This test case generates a html coverage report of test cases in /tests/testdata/coverage/multisource
// You can get the same reports in ./cov_html by running the following command:
// ```
// ./target/debug/deno test --coverage=cov_html tests/testdata/coverage/multisource
// ./target/debug/deno coverage --html cov_html
// ```
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
format!("--coverage={}", tempdir),
"coverage/multisource".to_string(),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--html".to_string(),
format!("{}/", tempdir),
])
.run();
output.assert_exit_code(0);
output.assert_matches_text("HTML coverage report has been generated at [WILDCARD]/cov/html/index.html\n");
let index_html = tempdir.join("html").join("index.html").read_to_string();
assert_contains!(index_html, "<h1>All files</h1>");
assert_contains!(index_html, "baz/");
assert_contains!(index_html, "href='baz/index.html'");
assert_contains!(index_html, "foo.ts");
assert_contains!(index_html, "href='foo.ts.html'");
assert_contains!(index_html, "bar.ts");
assert_contains!(index_html, "href='bar.ts.html'");
let foo_ts_html = tempdir.join("html").join("foo.ts.html").read_to_string();
assert_contains!(
foo_ts_html,
"<h1><a href='index.html'>All files</a> / foo.ts</h1>"
);
// Check that line count has correct title attribute
assert_contains!(
foo_ts_html,
"<span class='cline-any cline-yes' title='This line is covered 1 time'>x1</span>"
);
assert_contains!(
foo_ts_html,
"<span class='cline-any cline-yes' title='This line is covered 3 times'>x3</span>"
);
let bar_ts_html = tempdir.join("html").join("bar.ts.html").read_to_string();
assert_contains!(
bar_ts_html,
"<h1><a href='index.html'>All files</a> / bar.ts</h1>"
);
// Check <T> in source code is escaped to <T>
assert_contains!(bar_ts_html, "<T>");
// Check that line anchors are correctly referenced by line number links
assert_contains!(bar_ts_html, "<a href='#L1' id='L1'>1</a>");
let baz_index_html = tempdir
.join("html")
.join("baz")
.join("index.html")
.read_to_string();
assert_contains!(
baz_index_html,
"<h1><a href='../index.html'>All files</a> / baz</h1>"
);
assert_contains!(baz_index_html, "qux.ts");
assert_contains!(baz_index_html, "href='qux.ts.html'");
assert_contains!(baz_index_html, "quux.ts");
assert_contains!(baz_index_html, "href='quux.ts.html'");
let baz_qux_ts_html = tempdir
.join("html")
.join("baz")
.join("qux.ts.html")
.read_to_string();
assert_contains!(
baz_qux_ts_html,
"<h1><a href='../index.html'>All files</a> / <a href='../baz/index.html'>baz</a> / qux.ts</h1>"
);
let baz_quux_ts_html = tempdir
.join("html")
.join("baz")
.join("quux.ts.html")
.read_to_string();
assert_contains!(
baz_quux_ts_html,
"<h1><a href='../index.html'>All files</a> / <a href='../baz/index.html'>baz</a> / quux.ts</h1>"
);
}
#[test]
fn test_summary_reporter() {
let context = TestContext::default();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
format!("--coverage={}", tempdir),
"coverage/multisource".to_string(),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
{
let output = context
.new_command()
.args_vec(vec!["coverage".to_string(), format!("{}/", tempdir)])
.run();
output.assert_exit_code(0);
output.assert_matches_text(
"| File | Branch % | Line % |
| ----------- | -------- | ------ |
| bar.ts | 0.0 | 57.1 |
| baz/quux.ts | 0.0 | 28.6 |
| baz/qux.ts | 100.0 | 100.0 |
| foo.ts | 50.0 | 76.9 |
| All files | 40.0 | 61.0 |
",
);
}
// test --ignore flag works
{
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
format!("{}/", tempdir),
"--ignore=**/bar.ts,**/quux.ts".to_string(),
])
.run();
output.assert_exit_code(0);
output.assert_matches_text(
"| File | Branch % | Line % |
| ---------- | -------- | ------ |
| baz/qux.ts | 100.0 | 100.0 |
| foo.ts | 50.0 | 76.9 |
| All files | 66.7 | 85.0 |
",
);
}
}
#[test]
fn test_collect_summary_with_no_matches() {
let context: TestContext = TestContext::default();
let temp_dir: &TempDir = context.temp_dir();
let temp_dir_path: PathRef = PathRef::new(temp_dir.path().join("cov"));
let empty_test_dir: PathRef = temp_dir.path().join("empty_dir");
empty_test_dir.create_dir_all();
let output: util::TestCommandOutput = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
"--allow-read".to_string(),
format!("--coverage={}", temp_dir_path.as_path().display()),
empty_test_dir.as_path().to_str().unwrap().to_string(),
])
.run();
output.assert_exit_code(1);
let actual: &str = output.combined_output();
let expected_message: &str = "error: No test modules found";
assert_contains!(actual, expected_message);
// Check the contents of the coverage directory, ignoring 'empty_dir'
let mut unexpected_contents: Vec<std::path::PathBuf> = Vec::new();
for entry in std::fs::read_dir(temp_dir_path.as_path())
.unwrap()
.flatten()
{
if entry.file_name() != "empty_dir" {
// Ignore the 'empty_dir'
unexpected_contents.push(entry.path());
}
}
// Report unexpected contents
if !unexpected_contents.is_empty() {
eprintln!("Unexpected files or directories in the coverage directory:");
for path in &unexpected_contents {
eprintln!("{:?}", path);
}
}
// Assert that the coverage directory is otherwise empty
assert!(
unexpected_contents.is_empty(),
"Expected the coverage directory to be empty except for 'empty_dir', but found: {:?}",
unexpected_contents
);
}
fn worker_coverage_fn(script: &str, expected: &str) {
let context = TestContext::with_http_server();
let tempdir = context.temp_dir();
let tempdir = tempdir.path().join("cov");
let output = context
.new_command()
.args_vec(vec![
"test".to_string(),
"--quiet".to_string(),
"--allow-read".to_string(),
format!("--coverage={}", tempdir),
script.to_string(),
])
.run();
output.assert_exit_code(0);
output.skip_output_check();
let output = context
.new_command()
.args_vec(vec![
"coverage".to_string(),
"--detailed".to_string(),
format!("{}/", tempdir),
])
.split_output()
.run();
// Verify there's no "Check" being printed
assert!(output.stderr().is_empty());
output.assert_stdout_matches_file(util::testdata_path().join(expected));
output.assert_exit_code(0);
}
#[test]
fn worker_coverage1() {
worker_coverage_fn(
"coverage/worker/main1.js",
"coverage/worker/expected1.out",
);
}
#[test]
fn worker_coverage2() {
worker_coverage_fn(
"coverage/worker/main2.js",
"coverage/worker/expected2.out",
);
}
#[test]
fn worker_coverage3() {
worker_coverage_fn(
"coverage/worker/main3.js",
"coverage/worker/expected3.out",
);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/serve_tests.rs | tests/integration/serve_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::collections::HashMap;
use std::io::BufRead;
use std::io::BufReader;
use std::io::Read;
use std::time::Duration;
use pretty_assertions::assert_eq;
use regex::Regex;
use reqwest::RequestBuilder;
use test_util as util;
use test_util::DenoChild;
use test_util::eprintln;
use test_util::test;
use tokio::time::timeout;
struct ServeClient {
child: RefCell<DenoChild>,
client: reqwest::Client,
output_buf: RefCell<Vec<u8>>,
endpoint: RefCell<Option<String>>,
}
impl Drop for ServeClient {
fn drop(&mut self) {
let mut child = self.child.borrow_mut();
child.kill().unwrap();
child.wait().unwrap();
}
}
struct ServeClientBuilder(util::TestCommandBuilder, Option<String>);
impl ServeClientBuilder {
fn build(self) -> ServeClient {
let Some(entry_point) = self.1 else {
panic!("entry point required");
};
let cmd = self.0.arg(entry_point);
let child = cmd.spawn().unwrap();
ServeClient::with_child(child)
}
fn map(
self,
f: impl FnOnce(util::TestCommandBuilder) -> util::TestCommandBuilder,
) -> Self {
Self(f(self.0), self.1)
}
fn entry_point(self, file: impl AsRef<str>) -> Self {
Self(self.0, Some(file.as_ref().into()))
}
fn worker_count(self, n: Option<u64>) -> Self {
self.map(|t| {
let t = t.arg("--parallel");
if let Some(n) = n {
t.env("DENO_JOBS", n.to_string())
} else {
t
}
})
}
fn new() -> Self {
Self(
util::deno_cmd()
.env("NO_COLOR", "1")
.current_dir(util::testdata_path())
.arg("serve")
.arg("--port")
.arg("0")
.stdout_piped()
.stderr_piped(),
None,
)
}
}
impl ServeClient {
fn builder() -> ServeClientBuilder {
ServeClientBuilder::new()
}
fn with_child(child: DenoChild) -> Self {
Self {
child: RefCell::new(child),
output_buf: Default::default(),
endpoint: Default::default(),
client: reqwest::Client::builder()
.add_root_certificate(
reqwest::Certificate::from_pem(include_bytes!(
"../testdata/tls/RootCA.crt"
))
.unwrap(),
)
// disable connection pooling so we create a new connection per request
// which allows us to distribute requests across workers
.pool_max_idle_per_host(0)
.pool_idle_timeout(Duration::from_nanos(1))
.http2_prior_knowledge()
.build()
.unwrap(),
}
}
fn kill(self) {
let mut child = self.child.borrow_mut();
child.kill().unwrap();
child.wait().unwrap();
}
fn output(self) -> String {
let mut child = self.child.borrow_mut();
child.kill().unwrap();
let mut stderr = child.stderr.take().unwrap();
child.wait().unwrap();
let mut output_buf = self.output_buf.borrow_mut();
stderr.read_to_end(&mut output_buf).unwrap();
String::from_utf8(std::mem::take(&mut *output_buf)).unwrap()
}
fn get(&self) -> RequestBuilder {
let endpoint = self.endpoint();
self.client.get(&*endpoint)
}
fn endpoint(&self) -> String {
if let Some(e) = self.endpoint.borrow().as_ref() {
return e.to_string();
};
let mut buffer = self.output_buf.borrow_mut();
let mut temp_buf = [0u8; 64];
let mut child = self.child.borrow_mut();
let stderr = child.stderr.as_mut().unwrap();
let port_regex =
regex::bytes::Regex::new(r"Listening on https?:[^:]+:(\d+)/").unwrap();
let start = std::time::Instant::now();
// try to find the port number in the output
// it may not be the first line, so we need to read the output in a loop
let port = loop {
if start.elapsed() > Duration::from_secs(5) {
panic!(
"timed out waiting for serve to start. serve output:\n{}",
String::from_utf8_lossy(&buffer)
);
}
let read = stderr.read(&mut temp_buf).unwrap();
buffer.extend_from_slice(&temp_buf[..read]);
if let Some(p) = port_regex
.captures(&buffer)
.and_then(|c| c.get(1))
.map(|v| std::str::from_utf8(v.as_bytes()).unwrap().to_owned())
{
break p;
}
// this is technically blocking, but it's just a test and
// I don't want to switch RefCell to Mutex just for this
std::thread::sleep(Duration::from_millis(10));
};
eprintln!("stderr: {}", String::from_utf8_lossy(&temp_buf));
self
.endpoint
.replace(Some(format!("http://127.0.0.1:{port}")));
return self.endpoint.borrow().clone().unwrap();
}
}
#[test]
async fn deno_serve_port_0() {
let client = ServeClient::builder()
.entry_point("./serve/port_0.ts")
.build();
let res = client.get().send().await.unwrap();
assert_eq!(200, res.status());
let body = res.text().await.unwrap();
assert_eq!(body, "deno serve --port 0 works!");
client.kill();
}
#[test]
async fn deno_serve_no_args() {
let client = ServeClient::builder()
.entry_point("./serve/no_args.ts")
.build();
let res = client.get().send().await.unwrap();
assert_eq!(200, res.status());
let body = res.text().await.unwrap();
assert_eq!(body, "deno serve with no args in fetch() works!");
}
#[test]
async fn deno_serve_parallel() {
let client = ServeClient::builder()
.entry_point("./serve/parallel.ts")
.worker_count(Some(4))
.build();
let mut serve_counts = HashMap::<u32, u32>::new();
tokio::time::sleep(Duration::from_millis(1000)).await;
let serve_regex =
Regex::new(r"\[serve\-worker\-(\d+)\s*\] serving request").unwrap();
for _ in 0..100 {
let response = timeout(Duration::from_secs(2), client.get().send())
.await
.unwrap()
.unwrap();
assert_eq!(200, response.status());
let body = response.text().await.unwrap();
assert_eq!(body, "deno serve parallel");
tokio::time::sleep(Duration::from_millis(1)).await;
}
let output = client.output();
let listening_regex =
Regex::new(r"Listening on http[\w:/\.]+ with (\d+) threads").unwrap();
eprintln!("serve output:\n{output}");
assert_eq!(
listening_regex
.captures(&output)
.unwrap()
.get(1)
.unwrap()
.as_str()
.trim(),
"4"
);
// make sure all workers have at least started
let mut started = [false; 4];
let start_regex =
Regex::new(r"\[serve\-worker\-(\d+)\s*\] starting serve").unwrap();
for capture in start_regex.captures_iter(&output) {
if let Some(worker_number) =
capture.get(1).and_then(|m| m.as_str().parse::<u32>().ok())
{
started[worker_number as usize] = true;
}
}
assert!(started.iter().all(|&b| b));
for capture in serve_regex.captures_iter(&output) {
if let Some(worker_number) =
capture.get(1).and_then(|m| m.as_str().parse::<u32>().ok())
{
*serve_counts.entry(worker_number).or_default() += 1;
}
}
#[cfg(not(target_vendor = "apple"))] // FIXME: flaky on macOS, it tends to not distribute requests evenly
assert!(
serve_counts.values().filter(|&&n| n > 2).count() >= 2,
"bad {serve_counts:?}"
);
}
#[test]
async fn deno_run_serve_with_tcp_from_env() {
let mut child = util::deno_cmd()
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-net")
.arg("./serve/run_serve.ts")
.env("DENO_SERVE_ADDRESS", "tcp:127.0.0.1:0")
.stderr_piped()
.spawn()
.unwrap();
let stderr = BufReader::new(child.stderr.as_mut().unwrap());
let msg = stderr.lines().next().unwrap().unwrap();
// Deno.serve() listens on 0.0.0.0 by default. This checks DENO_SERVE_ADDRESS
// is not ignored by ensuring it's listening on 127.0.0.1.
let port_regex = Regex::new(r"http:\/\/127\.0\.0\.1:(\d+)").unwrap();
let port = port_regex.captures(&msg).unwrap().get(1).unwrap().as_str();
let client = reqwest::Client::builder().build().unwrap();
let res = client
.get(format!("http://127.0.0.1:{port}"))
.send()
.await
.unwrap();
assert_eq!(200, res.status());
let body = res.text().await.unwrap();
assert_eq!(body, "Deno.serve() works!");
child.kill().unwrap();
child.wait().unwrap();
}
#[test]
#[cfg(unix)]
async fn deno_run_serve_with_unix_socket_from_env() {
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::net::UnixStream;
let dir = tempfile::TempDir::new().unwrap();
let sock = dir.path().join("listen.sock");
let mut child = util::deno_cmd()
.current_dir(util::testdata_path())
.arg("run")
.arg(format!("--allow-read={}", sock.display()))
.arg(format!("--allow-write={}", sock.display()))
.arg("./serve/run_serve.ts")
.env("DENO_SERVE_ADDRESS", format!("unix:{}", sock.display()))
.stderr_piped()
.spawn()
.unwrap();
let stderr = BufReader::new(child.stderr.as_mut().unwrap());
stderr.lines().next().unwrap().unwrap();
// reqwest does not support connecting to unix sockets yet, so here we send the http
// payload directly
let mut conn = UnixStream::connect(dir.path().join("listen.sock"))
.await
.unwrap();
conn.write_all(b"GET / HTTP/1.0\r\n\r\n").await.unwrap();
let mut response = String::new();
conn.read_to_string(&mut response).await.unwrap();
assert!(response.ends_with("\r\nDeno.serve() works!"));
child.kill().unwrap();
child.wait().unwrap();
}
#[test]
#[cfg(unix)]
async fn deno_run_serve_with_duplicate_env_addr() {
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::net::UnixStream;
let dir = tempfile::TempDir::new().unwrap();
let sock = dir.path().join("listen.sock");
let mut child = util::deno_cmd()
.current_dir(util::testdata_path())
.arg("run")
.arg("--allow-net")
.arg(format!("--allow-read={}", sock.display()))
.arg(format!("--allow-write={}", sock.display()))
.arg("./serve/run_serve.ts")
.env(
"DENO_SERVE_ADDRESS",
format!("duplicate,unix:{}", sock.display()),
)
.stderr_piped()
.spawn()
.unwrap();
let stderr = BufReader::new(child.stderr.as_mut().unwrap());
let msg = stderr.lines().next().unwrap().unwrap();
let port_regex = Regex::new(r"https?:[^:]+:(\d+)").unwrap();
let port = port_regex
.captures(&msg)
.unwrap_or_else(|| panic!("Could not find regex in text:\n{}", msg))
.get(1)
.unwrap()
.as_str();
{
let client = reqwest::Client::builder().build().unwrap();
let res = client
.get(format!("http://127.0.0.1:{port}"))
.send()
.await
.unwrap();
assert_eq!(200, res.status());
let body = res.text().await.unwrap();
assert_eq!(body, "Deno.serve() works!");
}
{
// reqwest does not support connecting to unix sockets yet, so here we send the http
// payload directly
let mut conn = UnixStream::connect(dir.path().join("listen.sock"))
.await
.unwrap();
conn.write_all(b"GET / HTTP/1.0\r\n\r\n").await.unwrap();
let mut response = String::new();
conn.read_to_string(&mut response).await.unwrap();
assert!(response.ends_with("\r\nDeno.serve() works!"));
}
child.kill().unwrap();
child.wait().unwrap();
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/test_tests.rs | tests/integration/test_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use test_util::TestContext;
use test_util::TestContextBuilder;
use test_util::assert_contains;
use test_util::assert_not_contains;
use test_util::assertions::assert_wildcard_match;
use test_util::test;
use test_util::with_pty;
#[test]
fn junit_path() {
let context = TestContextBuilder::new().use_temp_cwd().build();
let temp_dir = context.temp_dir();
temp_dir.write("test.js", "Deno.test('does test', () => {});");
let output = context
.new_command()
.args("test --junit-path=sub_dir/output.xml test.js")
.run();
output.skip_output_check();
output.assert_exit_code(0);
temp_dir
.path()
.join("sub_dir/output.xml")
.assert_matches_text("<?xml [WILDCARD]");
}
#[test(flaky)]
// todo(#18480): re-enable
#[ignore]
fn sigint_with_hanging_test() {
with_pty(
&[
"test",
"--quiet",
"--no-check",
"test/sigint_with_hanging_test.ts",
],
|mut console| {
std::thread::sleep(std::time::Duration::from_secs(1));
console.write_line("\x03");
let text = console.read_until("hanging_test.ts:10:15");
assert_wildcard_match(
include_str!("../testdata/test/sigint_with_hanging_test.out"),
&text,
);
},
);
}
#[test]
fn test_with_glob_config() {
let context = TestContextBuilder::new().cwd("test").build();
let cmd_output = context
.new_command()
.args("test --config deno.glob.json")
.run();
cmd_output.assert_exit_code(0);
let output = cmd_output.combined_output();
assert_contains!(output, "glob/nested/fizz/fizz.ts");
assert_contains!(output, "glob/pages/[id].ts");
assert_contains!(output, "glob/nested/fizz/bar.ts");
assert_contains!(output, "glob/nested/foo/foo.ts");
assert_contains!(output, "glob/data/test1.js");
assert_contains!(output, "glob/nested/foo/bar.ts");
assert_contains!(output, "glob/nested/foo/fizz.ts");
assert_contains!(output, "glob/nested/fizz/foo.ts");
assert_contains!(output, "glob/data/test1.ts");
}
#[test]
fn test_with_glob_config_and_flags() {
let context = TestContextBuilder::new().cwd("test").build();
let cmd_output = context
.new_command()
.args("test --config deno.glob.json --ignore=glob/nested/**/bar.ts")
.run();
cmd_output.assert_exit_code(0);
let output = cmd_output.combined_output();
assert_contains!(output, "glob/nested/fizz/fizz.ts");
assert_contains!(output, "glob/pages/[id].ts");
assert_contains!(output, "glob/nested/fizz/bazz.ts");
assert_contains!(output, "glob/nested/foo/foo.ts");
assert_contains!(output, "glob/data/test1.js");
assert_contains!(output, "glob/nested/foo/bazz.ts");
assert_contains!(output, "glob/nested/foo/fizz.ts");
assert_contains!(output, "glob/nested/fizz/foo.ts");
assert_contains!(output, "glob/data/test1.ts");
let cmd_output = context
.new_command()
.args("test --config deno.glob.json glob/data/test1.?s")
.run();
cmd_output.assert_exit_code(0);
let output = cmd_output.combined_output();
assert_contains!(output, "glob/data/test1.js");
assert_contains!(output, "glob/data/test1.ts");
}
#[test]
fn conditionally_loads_type_graph() {
let context = TestContext::default();
let output = context
.new_command()
.args("test --reload -L debug run/type_directives_js_main.js")
.run();
output.assert_matches_text("[WILDCARD] - FileFetcher::fetch_no_follow - specifier: file:///[WILDCARD]/subdir/type_reference.d.ts[WILDCARD]");
let output = context
.new_command()
.args("test --reload -L debug --no-check run/type_directives_js_main.js")
.run();
assert_not_contains!(output.combined_output(), "type_reference.d.ts");
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/tests/integration/publish_tests.rs | tests/integration/publish_tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::process::Command;
use serde_json::json;
use test_util::TestContextBuilder;
use test_util::assert_contains;
use test_util::assert_not_contains;
use test_util::env_vars_for_jsr_provenance_tests;
use test_util::env_vars_for_jsr_tests;
use test_util::env_vars_for_jsr_tests_with_git_check;
use test_util::eprintln;
use test_util::test;
#[test]
fn publish_non_exported_files_using_import_map() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./mod.ts",
"imports": {
"@denotest/add": "jsr:@denotest/add@1"
}
}));
temp_dir.join("LICENSE").write("");
// file not in the graph
let other_ts = temp_dir.join("_other.ts");
other_ts
.write("import { add } from '@denotest/add'; console.log(add(1, 3));");
let mod_ts = temp_dir.join("mod.ts");
mod_ts.write("import { add } from '@denotest/add'; console.log(add(1, 2));");
let output = context
.new_command()
.args("publish --log-level=debug --token 'sadfasdf'")
.run();
output.assert_exit_code(0);
let lines = output.combined_output().split('\n').collect::<Vec<_>>();
eprintln!("{}", output.combined_output());
assert!(
lines
.iter()
.any(|l| l.contains("Unfurling") && l.ends_with("mod.ts"))
);
assert!(
lines
.iter()
.any(|l| l.contains("Unfurling") && l.ends_with("other.ts"))
);
}
#[test]
fn publish_warning_not_in_graph() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./mod.ts",
}));
temp_dir.join("LICENSE").write("");
// file not in the graph that uses a non-analyzable dynamic import (cause a diagnostic)
let other_ts = temp_dir.join("_other.ts");
other_ts
.write("const nonAnalyzable = './_other.ts'; await import(nonAnalyzable);");
let mod_ts = temp_dir.join("mod.ts");
mod_ts.write(
"export function test(a: number, b: number): number { return a + b; }",
);
context
.new_command()
.args("publish --token 'sadfasdf'")
.run()
.assert_matches_text(
"[WILDCARD]unable to analyze dynamic import[WILDCARD]",
);
}
#[test]
fn provenance() {
TestContextBuilder::new()
.use_http_server()
.envs(env_vars_for_jsr_provenance_tests())
.cwd("publish/successful")
.build()
.new_command()
.args("publish")
.run()
.assert_exit_code(0)
.assert_matches_file("publish/successful_provenance.out");
}
#[test]
fn ignores_gitignore() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts"
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("main.ts").write("import './sub_dir/b.ts';");
let gitignore = temp_dir.join(".gitignore");
gitignore.write("ignored.ts\nsub_dir/ignored.wasm");
let sub_dir = temp_dir.join("sub_dir");
sub_dir.create_dir_all();
sub_dir.join("ignored.wasm").write("");
sub_dir.join("b.ts").write("export default {}");
temp_dir.join("ignored.ts").write("");
let output = context
.new_command()
.arg("publish")
.arg("--dry-run")
.arg("--token")
.arg("sadfasdf")
.run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "b.ts");
assert_contains!(output, "main.ts");
assert_not_contains!(output, "ignored.ts");
assert_not_contains!(output, "ignored.wasm");
assert_not_contains!(output, ".gitignore");
}
#[test]
fn ignores_directories() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exclude": [ "ignore" ],
"publish": {
"exclude": [ "ignore2" ]
},
"exports": "./main_included.ts"
}));
let ignored_dirs = vec![
temp_dir.join(".git"),
temp_dir.join("node_modules"),
temp_dir.join("ignore"),
temp_dir.join("ignore2"),
];
for ignored_dir in ignored_dirs {
ignored_dir.create_dir_all();
ignored_dir.join("ignored.ts").write("");
}
let sub_dir = temp_dir.join("sub_dir");
sub_dir.create_dir_all();
sub_dir.join("sub_included.ts").write("");
temp_dir.join("main_included.ts").write("");
temp_dir.join("LICENSE").write("");
let output = context
.new_command()
.arg("publish")
.arg("--log-level=debug")
.arg("--token")
.arg("sadfasdf")
.run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "sub_included.ts");
assert_contains!(output, "main_included.ts");
assert_not_contains!(output, "ignored.ts");
}
#[test]
fn not_include_gitignored_file_unless_exact_match_in_include() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
"publish": {
// won't match ignored.ts because it needs to be
// unexcluded via a negated glob in exclude
"include": [
"deno.json",
"*.ts",
"exact_include.ts",
"sub"
]
}
}));
temp_dir.join("LICENSE").write("");
temp_dir
.join(".gitignore")
.write("ignored.ts\nexact_include.ts\nsub/\nsub/ignored\n/sub_ignored\n");
temp_dir.join("main.ts").write("");
temp_dir.join("ignored.ts").write("");
temp_dir.join("exact_include.ts").write("");
let sub_dir = temp_dir.join("sub");
sub_dir.create_dir_all();
sub_dir.join("sub_included.ts").write("");
sub_dir.join("ignored.ts").write(""); // this one is gitignored
sub_dir.join("ignored").create_dir_all();
sub_dir.join("ignored").join("ignored_also.ts").write("");
let sub_ignored_dir = temp_dir.join("sub_ignored");
sub_ignored_dir.create_dir_all();
sub_ignored_dir.join("sub_ignored.ts").write("");
let output = context.new_command().arg("publish").arg("--dry-run").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "main.ts");
// will match this exact match
assert_contains!(output, "exact_include.ts");
// will include this because the sub directory is included
assert_contains!(output, "sub_included.ts");
// it's gitignored
assert_not_contains!(output, "ignored.ts");
assert_not_contains!(output, "ignored_also.ts");
assert_not_contains!(output, "sub_ignored.ts");
}
#[test]
fn gitignore_everything_excluded_override() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join(".gitignore").write("*\n");
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./root_main.ts",
"publish": {
// should opt out of .gitignore even though everything
// is .gitignored
"exclude": ["!**"]
}
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("root_main.ts").write("");
let sub_dir = temp_dir.join("sub");
sub_dir.create_dir_all();
sub_dir.join("sub_main.ts").write("");
let output = context.new_command().arg("publish").arg("--dry-run").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "root_main.ts");
assert_contains!(output, "sub_main.ts");
}
#[test]
fn includes_directories_with_gitignore_when_unexcluded() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
"publish": {
"include": [ "deno.json", "*.ts" ],
"exclude": [ "!ignored.ts" ]
}
}));
temp_dir.join("LICENSE").write("");
temp_dir.join(".gitignore").write("ignored.ts");
temp_dir.join("main.ts").write("");
temp_dir.join("ignored.ts").write("");
let output = context.new_command().arg("publish").arg("--dry-run").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "main.ts");
assert_contains!(output, "ignored.ts");
}
#[test]
fn includes_unexcluded_sub_dir() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./included1.ts",
"publish": {
"exclude": [
"ignored",
"!ignored/unexcluded",
]
}
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("included1.ts").write("");
temp_dir.join("ignored/unexcluded").create_dir_all();
temp_dir.join("ignored/ignored.ts").write("");
temp_dir.join("ignored/unexcluded/included2.ts").write("");
let output = context.new_command().arg("publish").arg("--dry-run").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "included1.ts");
assert_contains!(output, "included2.ts");
assert_not_contains!(output, "ignored.ts");
}
#[test]
fn includes_directories() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
"publish": {
"include": [ "deno.json", "main.ts" ]
}
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("main.ts").write("");
temp_dir.join("ignored.ts").write("");
let output = context
.new_command()
.arg("publish")
.arg("--token")
.arg("sadfasdf")
.run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "main.ts");
assert_not_contains!(output, "ignored.ts");
}
#[test]
fn not_includes_gitignored_dotenv() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("main.ts").write("");
temp_dir.join(".env").write("FOO=BAR");
temp_dir.join(".gitignore").write(".env");
let output = context.new_command().arg("publish").arg("--dry-run").run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "main.ts");
assert_not_contains!(output, ".env");
}
#[test]
fn not_includes_vendor_dir_only_when_vendor_true() {
let context = publish_context_builder().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("main.ts").write("");
let vendor_folder = temp_dir.join("vendor");
vendor_folder.create_dir_all();
vendor_folder.join("vendor.ts").write("");
let publish_cmd = context.new_command().args("publish --dry-run");
{
let output = publish_cmd.run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "main.ts");
assert_contains!(output, "vendor.ts");
}
// with vendor
{
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
"vendor": true,
}));
let output = publish_cmd.run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "main.ts");
assert_not_contains!(output, "vendor.ts");
}
}
#[test]
fn allow_dirty() {
let context = publish_context_builder_with_git_checks().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("main.ts").write("");
let cmd = Command::new("git")
.arg("init")
.arg(temp_dir.as_path())
.output()
.unwrap();
assert!(cmd.status.success());
let output = context
.new_command()
.arg("publish")
.arg("--token")
.arg("sadfasdf")
.run();
output.assert_exit_code(1);
output.assert_matches_text(r#"Check [WILDLINE]
Checking for slow types in the public API...
Uncommitted changes:
?? LICENSE
?? deno.json
?? main.ts
error: Aborting due to uncommitted changes. Check in source code or run with --allow-dirty
"#);
let output = context
.new_command()
.arg("publish")
.arg("--allow-dirty")
.arg("--token")
.arg("sadfasdf")
.run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Successfully published");
}
#[test]
fn allow_dirty_not_in_repo() {
let context = publish_context_builder_with_git_checks().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("main.ts").write("");
// At this point there are untracked files, but we're not in Git repo,
// so we should be able to publish successfully.
let output = context
.new_command()
.arg("publish")
.arg("--token")
.arg("sadfasdf")
.run();
output.assert_exit_code(0);
let output = output.combined_output();
assert_contains!(output, "Successfully published");
}
#[test]
fn allow_dirty_dry_run() {
let context = publish_context_builder_with_git_checks().build();
let temp_dir = context.temp_dir().path();
temp_dir.join("deno.json").write_json(&json!({
"name": "@foo/bar",
"version": "1.0.0",
"exports": "./main.ts",
}));
temp_dir.join("LICENSE").write("");
temp_dir.join("main.ts").write("");
let cmd = Command::new("git")
.arg("init")
.arg(temp_dir.as_path())
.output()
.unwrap();
assert!(cmd.status.success());
let output = context
.new_command()
.arg("publish")
.arg("--dry-run")
.arg("--token")
.arg("sadfasdf")
.run();
output.assert_exit_code(1);
let output = output.combined_output();
assert_contains!(
output,
"Aborting due to uncommitted changes. Check in source code or run with --allow-dirty"
);
}
fn publish_context_builder() -> TestContextBuilder {
TestContextBuilder::new()
.use_http_server()
.envs(env_vars_for_jsr_tests())
.use_temp_cwd()
}
fn publish_context_builder_with_git_checks() -> TestContextBuilder {
TestContextBuilder::new()
.use_http_server()
.envs(env_vars_for_jsr_tests_with_git_check())
.use_temp_cwd()
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.