repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-working-with-hashmaps.rs | activities/src/bin/demo-working-with-hashmaps.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-generics-and-structures.rs | activities/src/bin/demo-generics-and-structures.rs | struct Dimensions {
width: f64,
height: f64,
depth: f64,
}
trait Convey {
fn weight(&self) -> f64;
fn dimensions(&self) -> Dimensions;
}
struct CarPart {
width: f64,
height: f64,
depth: f64,
weight: f64,
part_number: String,
}
impl Default for CarPart {
fn default() -> Self {
Self {
width: 5.0,
height: 1.0,
depth: 2.0,
weight: 3.0,
part_number: "abc".to_owned(),
}
}
}
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a5.rs | activities/src/bin/a5.rs | // Topic: Looping using the loop statement
//
// Program requirements:
// * Display "1" through "4" in the terminal
//
// Notes:
// * Use a mutable integer variable
// * Use a loop statement
// * Print the variable within the loop statement
// * Use break to exit the loop
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-abstract-network-request.rs | activities/src/bin/demo-abstract-network-request.rs | #![allow(dead_code)]
use anyhow::Result;
use serde::Deserialize;
#[derive(Debug, Clone, Deserialize)]
struct Person {
name: String,
country: String,
}
#[derive(Debug, Clone, Deserialize)]
struct PersonResponse {
status: String,
code: u16,
total: u64,
data: Vec<Person>,
}
fn get_person(client: &reqwest::blocking::Client) -> Result<PersonResponse> {
let response: PersonResponse = client
.get("https://fakerapi.it/api/v1/custom?_quantity=1&name=name&country=country")
.send()?
.json()?;
Ok(response)
}
#[derive(Debug, Default)]
struct App {
client: reqwest::blocking::Client,
}
impl App {
pub fn fetch_person(&self) -> Result<PersonResponse> {
get_person(&self.client)
}
}
fn main() -> Result<()> {
Ok(())
}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-using-iterators.rs | activities/src/bin/demo-using-iterators.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-enums.rs | activities/src/bin/demo-enums.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a3b.rs | activities/src/bin/a3b.rs | // Topic: Flow control using if..else if..else
//
// Program requirements:
// * Display ">5", "<5", or "=5" based on the value of a variable
// is > 5, < 5, or == 5, respectively
//
// Notes:
// * Use a variable set to any integer value
// * Use an if..else if..else block to determine which message to display
// * Use the println macro to display messages to the terminal
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-passing-closures-to-functions.rs | activities/src/bin/demo-passing-closures-to-functions.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-gathering-user-input.rs | activities/src/bin/demo-gathering-user-input.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/m6.rs | activities/src/bin/m6.rs | // Topic: Macro practice
//
// Summary:
// Create a macro that measures how long a function takes to execute.
//
// Requirements:
// * Write a single macro that executes a function:
// * Prior to executing the function, print out "Call: ", followed
// by the function name
// * Measure how long the function takes to executes
// * Print out (in nanoseconds) how long the function takes to execute
// * Measure each sample function with the macro
//
// Notes:
// * `std::time::Instant` can be used to calculate elapsed time
// * Use `stringify!` to get a string representation of the function name
fn sample_fn_1() {
use std::time::Duration;
std::thread::sleep(Duration::from_millis(2));
}
fn sample_fn_2(n: u64) {
let mut n = n;
while n > 0 {
use std::time::Duration;
std::thread::sleep(Duration::from_micros(n));
n -= 1;
}
}
fn sample_fn_3(lhs: usize, rhs: usize) -> usize {
lhs + rhs
}
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-generating-documentation.rs | activities/src/bin/demo-generating-documentation.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-channels.rs | activities/src/bin/demo-channels.rs | use crossbeam_channel::unbounded;
use std::thread;
enum ThreadMsg {
PrintData(String),
Sum(i64, i64),
Quit,
}
fn main() {
let handle = thread::spawn(move || {});
handle.join();
}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-while-let.rs | activities/src/bin/demo-while-let.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-generics-and-functions.rs | activities/src/bin/demo-generics-and-functions.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-option.rs | activities/src/bin/demo-option.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a17.rs | activities/src/bin/a17.rs | // Topic: Browsing standard library documentation
//
// Requirements:
// * Print a string in lowercase and uppercase
//
// Notes:
// * Utilize standard library functionality to
// transform the string to lowercase and uppercase
// * Use 'rustup doc' in a terminal to open the standard library docs
// * Navigate to the API documentation section
// * Search for functionality to transform a string (or str)
// to uppercase and lowercase
// * Try searching for: to_uppercase, to_lowercase
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a3a.rs | activities/src/bin/a3a.rs | // Topic: Flow control using if..else
//
// Program requirements:
// * Displays a message based on the value of a boolean variable
// * When the variable is set to true, display "hello"
// * When the variable is set to false, display "goodbye"
//
// Notes:
// * Use a variable set to either true or false
// * Use an if..else block to determine which message to display
// * Use the println macro to display messages to the terminal
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a4b.rs | activities/src/bin/a4b.rs | // Topic: Decision making with match
//
// Program requirements:
// * Display "one", "two", "three", or "other" based on whether
// the value of a variable is 1, 2, 3, or some other number,
// respectively
//
// Notes:
// * Use a variable set to any integer
// * Use a match expression to determine which message to display
// * Use an underscore (_) to match on any value
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-inline-modules.rs | activities/src/bin/demo-inline-modules.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/mc-01.rs | activities/src/bin/mc-01.rs | // Topic: Maintainable code via traits
//
// Summary:
// Recently there was a power outage and all of the messages stored in the message queue
// were lost. You have been tasked with adding functionality to save and load the queue. Review
// the code and then implement the requirements as detailed below.
//
// Requirements:
// - Create a trait named `MessageQueueStorage` that allows the entire queue to be saved and loaded
// - The trait should have 2 methods:
// - `save(&self, queue: &MessageQueue) -> Result<(), MessageQueueStorageError>;`
// - `load(&self) -> Result<MessageQueue, MessageQueueStorageError>;`
// - Create a struct named `FileStore` and then implement the `MessageQueueStorage` trait on it
// - The implementation should save the entire queue to a single file and also load it from a
// single file
// - Implement a `new` method which allows specifying the file path
// - Use the provided `FileStoreError` type for errors that occur in your implementation and then
// convert it to `MessageQueueStorageError` in the trait method
// - This can be done automatically by using the question mark operator
// - Run `cargo test --bin mc-01` to check your work
//
// Tips:
// - You'll need to serialize and deserialize the message queue
// - Serialize: read each entry in the queue and then save them to a file
// - Deserialize: read each entry from the file and then create a new queue
//
// - The storage format is left unspecified. Here are a few options:
// - Comma-separated values (CSV) format:
// - Format each message by `id,content`
// - JSON format:
// - add `#[derive(Serialize, Deserialize)]` to the message queue
// - use the `serde_json` crate to perform the serialize and deserialize operation
use color_eyre::eyre::eyre;
use std::collections::VecDeque;
/// A message in the queue.
///
/// ***********************
/// Do not edit the message
/// ***********************
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Message {
pub id: u32,
pub content: String,
}
impl Message {
/// Create a new message.
pub fn new<S: Into<String>>(id: u32, content: S) -> Self {
Self {
id,
content: content.into(),
}
}
}
/// An error that may occur while saving and loading the queue using a storage backend.
///
/// ***************************************************************************
/// Do not edit this error type. It is part of the `MessageQueueStorage` trait.
/// ***************************************************************************
#[derive(Debug, thiserror::Error)]
#[error("message queue storage error")]
struct MessageQueueStorageError {
// this allows putting any errors as a source
source: color_eyre::Report,
}
/// Errors that may occur while working with the `FileStore`.
///
/// ***************************************************
/// Change this enum as needed for your implementation.
/// ***************************************************
#[derive(Debug, thiserror::Error)]
enum FileStoreError {
#[error("IO error")]
IO(#[from] std::io::Error),
// add more variants if needed
}
/// Allows conversion of error type using question mark operator.
///
/// *****************************
/// You can convert a `FileStoreError` to a `MessageQueueStorageError` using `map_err`:
///
/// fn foo() -> Result<(), MessageQueueStorageError> {
/// do_fallible_thing().map_err(MessageQueueStorageError::from)
/// }
///
/// You can also use the question mark operator:
///
/// fn foo() -> Result<(), MessageQueueStorageError> {
/// let result = do_fallible_thing()?;
/// Ok(result)
/// }
///
/// or
///
/// fn foo() -> Result<(), MessageQueueStorageError> {
/// Ok(do_fallible_thing()?);
/// }
/// *****************************
impl From<FileStoreError> for MessageQueueStorageError {
fn from(value: FileStoreError) -> Self {
Self {
source: eyre!(value),
}
}
}
/// A message queue.
///
/// *****************************
/// Do not edit the message queue
/// *****************************
#[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct MessageQueue {
messages: VecDeque<Message>,
next_id: u32,
}
impl MessageQueue {
/// Add a new message to the queue.
pub fn enqueue<M: Into<String>>(&mut self, message: M) {
let message = Message {
id: self.next_id,
content: message.into(),
};
self.messages.push_back(message);
self.next_id += 1;
}
/// Remove and return the first message in the queue.
pub fn dequeue(&mut self) -> Option<Message> {
self.messages.pop_front()
}
/// Iterate over all messages in the queue.
pub fn iter(&self) -> std::collections::vec_deque::Iter<'_, Message> {
self.messages.iter()
}
}
/********************************************
* Add your code here:
* - `MessageQueueStorage` trait
* - `FileStore` struct
* - implementation blocks
********************************************/
/// *****************************************************************
/// use `cargo test --bin mc-01` to check your work.
/// *****************************************************************
/// use `cargo run --bin mc-01` to experiment using the main function
/// *****************************************************************
fn main() -> color_eyre::Result<()> {
// show pretty error output
color_eyre::install().unwrap();
let mut queue = MessageQueue::default();
queue.enqueue("first message");
queue.enqueue("second message");
Ok(())
// save/load here
}
#[cfg(test)]
mod tests {
use super::*;
const TEST_FILE_NAME: &str = ".mc-01-test";
fn cleanup() {
let _ = std::fs::remove_file(TEST_FILE_NAME);
}
#[test]
fn queue_saves_and_loads_correctly() {
color_eyre::install().unwrap();
let test = || -> Result<(), color_eyre::Report> {
let mut queue = MessageQueue::default();
queue.enqueue("a");
queue.enqueue("b");
queue.dequeue();
queue.enqueue("c");
let storage = FileStore::new(".mc-01-test");
storage.save(&queue)?;
let loaded_queue = storage.load()?;
cleanup();
assert_eq!(loaded_queue, queue);
Ok(())
};
let results = test();
cleanup();
results.expect("test failed");
}
}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-const.rs | activities/src/bin/demo-const.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-numeric-types.rs | activities/src/bin/demo-numeric-types.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a30.rs | activities/src/bin/a30.rs | // Topic: Generics & Structures
//
// Requirements:
// * Create a Vehicle structure that is generic over traits Body and Color
// * Create structures for vehicle bodies and vehicle colors and implement the
// Body and Color traits for these structures
// * Implement a 'new' function for Vehicle that allows it to have any body
// and any color
// * Create at least two different vehicles in the main function and print their
// info
//
// Notes:
// * Examples of car bodies can be Truck, Car, Scooter
// * Examples of colors could be red, white, black
// * It is not necessary to have data fields or function implementations
// for the vehicle bodies/colors
trait Body {}
trait Color {}
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a21.rs | activities/src/bin/a21.rs | // Topic: Map combinator
//
// Requirements:
// * Given a user name, create and print out a User struct if the user exists
//
// Notes:
// * Use the existing find_user function to locate a user
// * Use the map function to create the User
// * Print out the User struct if found, or a "not found" message if not
#[derive(Debug)]
struct User {
user_id: i32,
name: String,
}
/// Locates a user id based on the name.
fn find_user(name: &str) -> Option<i32> {
let name = name.to_lowercase();
match name.as_str() {
"sam" => Some(1),
"matt" => Some(5),
"katie" => Some(9),
_ => None,
}
}
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-basic-closures.rs | activities/src/bin/demo-basic-closures.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a36.rs | activities/src/bin/a36.rs | // Topic: Arrays & Slices
//
// Requirements:
// * Print pairs of numbers and their sums as they are streamed from a data source
// * If only one number is received, then print "Unpaired value: V",
// where V is the value
//
// Notes:
// * A simulated data stream is already configured in the code
// * See the stdlib docs for the "chunks" method on "slice" for more info
fn data() -> &'static [u64] {
&[5, 5, 4, 4, 3, 3, 1]
}
fn main() {
// `stream` is an iterator of &[u64]
let mut stream = data().chunks(2);
}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-deriving-functionality.rs | activities/src/bin/demo-deriving-functionality.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a16.rs | activities/src/bin/a16.rs | // Topic: Option
//
// Requirements:
// * Print out the details of a student's locker assignment
// * Lockers use numbers and are optional for students
//
// Notes:
// * Use a struct containing the student's name and locker assignment
// * The locker assignment should use an Option<i32>
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a26.rs | activities/src/bin/a26.rs | // Topic: External crates
//
// Requirements:
// * Display the current date and time
//
// Notes:
// * Use the `chrono` crate to work with time
// * (OPTIONAL) Read the documentation section `Formatting and Parsing`
// for examples on how to create custom time formats
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a18b.rs | activities/src/bin/a18b.rs | // Topic: Result & the question mark operator
//
// Summary:
// This small program simulates unlocking a door using digital keycards
// backed by a database. Many errors can occur when working with a database,
// making the question mark operator the perfect thing to use to keep
// the code managable.
//
// Requirements:
// * Write the body of the `authorize` function. The steps to authorize a user
// are:
// 1. Connect to the database
// 2. Find the employee with the `find_employee` database function
// 3. Get a keycard with the `get_keycard` database function
// 4. Determine if the keycard's `access_level` is sufficient, using the
// `required_access_level` function implemented on `ProtectedLocation`.
// * Higher `access_level` values grant more access to `ProtectedLocations`.
// 1000 can access 1000 and lower. 800 can access 500 but not 1000, ...
// * Run the program after writing your `authorize` function. Expected output:
// Ok(Allow)
// Ok(Deny)
// Err("Catherine doesn't have a keycard")
// * Use the question mark operator within the `authorize` function.
//
// Notes:
// * Only the `authorize` function should be changed. Everything else can remain
// unmodified.
#[derive(Clone, Copy, Debug)]
enum ProtectedLocation {
All,
Office,
Warehouse,
}
impl ProtectedLocation {
fn required_access_level(&self) -> u16 {
match self {
Self::All => 1000,
Self::Office => 800,
Self::Warehouse => 500,
}
}
}
#[derive(Debug)]
struct Database;
impl Database {
fn connect() -> Result<Self, String> {
// In a production application, a database connection error is likely to occur here.
Ok(Database)
}
fn find_employee(&self, name: &str) -> Result<Employee, String> {
match name {
"Anita" => Ok(Employee {
name: "Anita".to_string(),
}),
"Brody" => Ok(Employee {
name: "Brody".to_string(),
}),
"Catherine" => Ok(Employee {
name: "Catherine".to_string(),
}),
_ => Err(String::from("employee not found")),
}
}
fn get_keycard(&self, employee: &Employee) -> Result<KeyCard, String> {
match employee.name.as_str() {
"Anita" => Ok(KeyCard { access_level: 1000 }),
"Brody" => Ok(KeyCard { access_level: 500 }),
other => Err(format!("{other} doesn't have a keycard")),
}
}
}
#[derive(Clone, Debug)]
struct Employee {
name: String,
}
#[derive(Debug)]
struct KeyCard {
access_level: u16,
}
#[derive(Clone, Copy, Debug)]
enum AuthorizationStatus {
Allow,
Deny,
}
fn authorize(
employee_name: &str,
location: ProtectedLocation,
) -> Result<AuthorizationStatus, String> {
// put your code here
}
fn main() {
// Anita is trying to access the Warehouse, which requires access level 500.
// Her keycard has access level 1000, which should be allowed.
let anita_authorized = authorize("Anita", ProtectedLocation::Warehouse);
// Brody is trying to access the Office, which requires access level 800.
// His keycard has access level 500, which should be denied.
let brody_authorized = authorize("Brody", ProtectedLocation::Office);
// Catherine is trying to access the Warehouse, which requires access level 500.
// She doesn't have a keycard, so this should be an error.
let catherine_authorized = authorize("Catherine", ProtectedLocation::Warehouse);
println!("{anita_authorized:?}");
println!("{brody_authorized:?}");
println!("{catherine_authorized:?}");
}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-implementing-functionality.rs | activities/src/bin/demo-implementing-functionality.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-ownership.rs | activities/src/bin/demo-ownership.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a10.rs | activities/src/bin/a10.rs | // Topic: Working with expressions
//
// Requirements:
// * Print "it's big" if a variable is > 100
// * Print "it's small" if a variable is <= 100
//
// Notes:
// * Use a boolean variable set to the result of
// an if..else expression to store whether the value
// is > 100 or <= 100
// * Use a function to print the messages
// * Use a match expression to determine which message
// to print
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a7.rs | activities/src/bin/a7.rs | // Topic: Working with an enum
//
// Program requirements:
// * Prints the name of a color to the terminal
//
// Notes:
// * Use an enum with color names as variants
// * Use a function to print the color name
// * The function must use the enum as a parameter
// * Use a match expression to determine which color
// name to print
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/m5.rs | activities/src/bin/m5.rs | // Topic: Macro practice
//
// Summary:
// Create a macro that can be used to generate new test cases for
// the function provided.
//
// Requirements:
// * Write a macro to generate tests for `sample_fn`
// * Create at least 6 test cases using the macro
// * Test the minimum and maximum values for each match arm
// * All test functions must be created by invoking the macro
//
// Notes:
// * Tuples can be used to specify both the input and expected output
// * The macro can be invoked multiple times; repetitions are optional
#[derive(Debug, PartialEq)]
enum Size {
Small,
Medium,
Large,
}
fn sample_fn(n: u8) -> Size {
use Size::*;
match n {
0..=53 => Small,
54..=154 => Medium,
155.. => Large
}
}
fn main() {
// use `cargo test --bin m5` to check your work
}
#[cfg(test)]
mod test { }
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-map-combinator.rs | activities/src/bin/demo-map-combinator.rs | fn maybe_num() -> Option<i32> {
Some(1)
}
fn maybe_word() -> Option<String> {
None
}
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a34.rs | activities/src/bin/a34.rs | // Topic: Typestates
//
// Summary:
// An airline wants to reduce the amount of lost luggage by
// ensuring luggage is properly tracked.
//
// Requirements:
// * Implement a luggage tracking system using the typestate pattern
// * Each piece of luggage has a tracking id
// * Luggage goes through multiple states at the airport:
// * Check-in (passenger gives luggage to airport)
// * OnLoading (luggage is loaded onto correct plane)
// * Offloading (luggage is taken off plane at destination)
// * AwaitingPickup (luggage is at destination waiting for passenger pickup)
// * EndCustody (luggage was picked up by passenger)
// Notes:
// * Optionally use generics for each state
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-new-type-pattern.rs | activities/src/bin/demo-new-type-pattern.rs | fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/demo-implementing-intoiterator.rs | activities/src/bin/demo-implementing-intoiterator.rs | use std::collections::HashMap;
#[derive(Debug, Hash, Eq, PartialEq)]
enum Fruit {
Apple,
Banana,
Orange,
}
struct FruitStand {
fruit: HashMap<Fruit, u32>,
}
fn main() {
let mut fruit = HashMap::new();
fruit.insert(Fruit::Banana, 5);
fruit.insert(Fruit::Apple, 2);
fruit.insert(Fruit::Orange, 6);
let fruit = fruit;
let mut store = FruitStand { fruit };
}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a39.rs | activities/src/bin/a39.rs | // Topic: Channels
//
// Summary:
// Using the existing code, create a program that simulates an internet-of-things
// remote control light bulb. The color of the light can be changed remotely.
// Use threads and channels to communicate what color the light bulb should display.
//
// Requirements:
// * Create a separate thread representing the light bulb
// * Use a channel to communicate with the thread
// * Display a color change message using the println! macro
// * The light bulb must also be able to turn on and off
// * Display whether the light is on or off on each color change
// * Turn off the light when disconnecting from it
//
// Notes:
// * Remember to add `crossbeam-channel` to your Cargo.toml file
// * Use the `colored` crate if you want to get fancy and display actual colors
// * The docs.rs site can be used to read documentation for third-party crates
// * Disconnection can be accomplished by dropping the sender, or
// by telling the thread to self-terminate
// * Use `cargo test --bin a39` to test your program to ensure all cases are covered
use crossbeam_channel::{unbounded, Receiver};
use std::thread::{self, JoinHandle};
enum LightMsg {
// Add additional variants needed to complete the exercise
ChangeColor(u8, u8, u8),
Disconnect,
}
enum LightStatus {
Off,
On,
}
fn spawn_light_thread(receiver: Receiver<LightMsg>) -> JoinHandle<LightStatus> {
// Add code here to spawn a thread to control the light bulb
}
fn main() {}
#[cfg(test)]
mod test {
use super::*;
use crossbeam_channel::unbounded;
#[test]
fn light_off_when_disconnect() {
let (s, r) = unbounded();
let light = spawn_light_thread(r);
s.send(LightMsg::Disconnect).expect("channel disconnected");
let light_status = light.join().expect("failed to join light thread");
if let LightStatus::On = light_status {
panic!("light should be off after disconnection");
}
}
#[test]
fn light_off_when_dropped() {
let (s, r) = unbounded();
let light = spawn_light_thread(r);
drop(s);
let light_status = light.join().expect("failed to join light thread");
if let LightStatus::On = light_status {
panic!("light should be off after dropping sender");
}
}
}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a25.rs | activities/src/bin/a25.rs | // Topic: Traits
//
// Requirements:
// * Calculate the perimeter of a square and triangle:
// * The perimeter of a square is the length of any side*4.
// * The perimeter of a triangle is a+b+c where each variable
// represents the length of a side.
// * Print out the perimeter of the shapes
//
// Notes:
// * Use a trait to declare a perimeter calculation function
// * Use a single function to print out the perimeter of the shapes
// * The function must utilize impl trait as a function parameter
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a24.rs | activities/src/bin/a24.rs | // Topic: Iterator
//
// Requirements:
// * Triple the value of each item in a vector.
// * Filter the data to only include values > 10.
// * Print out each element using a for loop.
//
// Notes:
// * Use an iterator chain to accomplish the task.
fn main() {
let data = vec![1, 2, 3, 4, 5];
}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a28.rs | activities/src/bin/a28.rs | // Topic: New type pattern
//
// Requirements:
// * Display the selected color of shoes, a shirt, and pants
// * Create and display at least one of each type of clothes and color
//
// Notes:
// * Create a new type for each clothing item that wraps the Color enum
// * Each new type should implement a `new` function
// * Create a function for each type of clothes (shoes, shirt, pants)
// that accepts the new type specific to that type of clothing
enum Color {
Black,
Blue,
Brown,
Custom(String),
Gray,
Green,
Purple,
Red,
White,
Yellow,
}
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a2.rs | activities/src/bin/a2.rs | // Topic: Basic arithmetic
//
// Program requirements:
// * Displays the result of the sum of two numbers
//
// Notes:
// * Use a function to add two numbers together
// * Use a function to display the result
// * Use the "{:?}" token in the println macro to display the result
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a6.rs | activities/src/bin/a6.rs | // Topic: Looping using the while statement
//
// Program requirements:
// * Counts down from 5 to 1, displays the countdown
// in the terminal, then prints "done!" when complete.
//
// Notes:
// * Use a mutable integer variable
// * Use a while statement
// * Print the variable within the while loop
// * Do not use break to exit the loop
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
jayson-lennon/ztm-rust | https://github.com/jayson-lennon/ztm-rust/blob/cb0ac4768346c7270ba3655b32bef022b4803460/activities/src/bin/a38.rs | activities/src/bin/a38.rs | // Topic: Multithreading
//
// Requirements:
// * Run the provided functions in threads
// * Retrieve the data from the threads to print the message
// "Hello, threads!"
//
// Notes:
// * Use the join function to wait for threads to finish
fn msg_hello() -> &'static str {
use std::time::Duration;
std::thread::sleep(Duration::from_millis(1000));
"Hello, "
}
fn msg_thread() -> &'static str {
use std::time::Duration;
std::thread::sleep(Duration::from_millis(1000));
"threads"
}
fn msg_excited() -> &'static str {
use std::time::Duration;
std::thread::sleep(Duration::from_millis(1000));
"!"
}
fn main() {}
| rust | MIT | cb0ac4768346c7270ba3655b32bef022b4803460 | 2026-01-04T20:24:50.396322Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/vsock_proxy/src/lib.rs | vsock_proxy/src/lib.rs | // Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod dns;
pub mod proxy;
#[derive(Copy, Clone, PartialEq)]
pub enum IpAddrType {
/// Only allows IP4 addresses
IPAddrV4Only,
/// Only allows IP6 addresses
IPAddrV6Only,
/// Allows both IP4 and IP6 addresses
IPAddrMixed,
}
/// The most common result type provided by VsockProxy operations.
pub type VsockProxyResult<T> = Result<T, String>;
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/vsock_proxy/src/dns.rs | vsock_proxy/src/dns.rs | // Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(warnings)]
use std::net::IpAddr;
use chrono::{DateTime, Duration, Utc};
use hickory_resolver::Resolver;
use idna::domain_to_ascii;
use crate::{IpAddrType, VsockProxyResult};
/// `DnsResolutionInfo` represents DNS resolution information, including the resolved
/// IP address, TTL value and last resolution time.
#[derive(Copy, Clone, Debug)]
pub struct DnsResolutionInfo {
/// The IP address that the hostname was resolved to.
ip_addr: IpAddr,
/// The configured duration after which the DNS resolution should be refreshed.
ttl: Duration,
/// The timestamp representing the last time the DNS resolution was performed.
last_dns_resolution_time: DateTime<Utc>,
}
impl DnsResolutionInfo {
pub fn is_expired(&self) -> bool {
Utc::now() > self.last_dns_resolution_time + self.ttl
}
fn new(new_ip_addr: IpAddr, new_ttl: Duration) -> Self {
DnsResolutionInfo {
ip_addr: new_ip_addr,
ttl: new_ttl,
last_dns_resolution_time: Utc::now(),
}
}
pub fn ip_addr(&self) -> IpAddr {
self.ip_addr
}
pub fn ttl(&self) -> Duration {
self.ttl
}
}
/// Resolve a DNS name (IDNA format) into multiple IP addresses (v4 or v6)
pub fn resolve(addr: &str, ip_addr_type: IpAddrType) -> VsockProxyResult<Vec<DnsResolutionInfo>> {
// IDNA parsing
let addr = domain_to_ascii(addr).map_err(|_| "Could not parse domain name")?;
// Initialize a DNS resolver using the system's configured nameservers.
let resolver = Resolver::from_system_conf()
.map_err(|_| "Error while initializing DNS resolver!".to_string())?;
// DNS lookup
// It results in a vector of IPs (V4 and V6)
let rresults: Vec<DnsResolutionInfo> = resolver
.lookup_ip(addr)
.map_err(|_| "DNS lookup failed!")?
.as_lookup()
.records()
.iter()
.filter_map(|record| {
if let Some(rdata) = record.data() {
if let Some(ip_addr) = rdata.ip_addr() {
let ttl = Duration::seconds(record.ttl() as i64);
return Some(DnsResolutionInfo::new(ip_addr, ttl));
}
}
None
})
.collect();
if rresults.is_empty() {
return Err("DNS lookup returned no IP addresses!".into());
}
// If there is no restriction, choose randomly
if IpAddrType::IPAddrMixed == ip_addr_type {
return Ok(rresults);
}
//Partition the resolution results into groups that use IPv4 or IPv6 addresses.
let (rresults_with_ipv4, rresults_with_ipv6): (Vec<_>, Vec<_>) = rresults
.into_iter()
.partition(|result| result.ip_addr().is_ipv4());
if IpAddrType::IPAddrV4Only == ip_addr_type && !rresults_with_ipv4.is_empty() {
Ok(rresults_with_ipv4)
} else if IpAddrType::IPAddrV6Only == ip_addr_type && !rresults_with_ipv6.is_empty() {
Ok(rresults_with_ipv6)
} else {
Err("No accepted IP was found.".to_string())
}
}
/// Resolve a DNS name (IDNA format) into a single address with a TTL value
pub fn resolve_single(addr: &str, ip_addr_type: IpAddrType) -> VsockProxyResult<DnsResolutionInfo> {
let rresults = resolve(addr, ip_addr_type)?;
// Return the first resolved IP address and its TTL value.
rresults
.first()
.cloned()
.ok_or_else(|| format!("Unable to resolve the DNS name: {addr}"))
}
#[cfg(test)]
mod tests {
use super::*;
use ctor::ctor;
use std::env;
use std::sync::Once;
static TEST_INIT: Once = Once::new();
static mut INVALID_TEST_DOMAIN: &str = "invalid-domain";
static mut IPV4_ONLY_TEST_DOMAIN: &str = "v4.ipv6test.app";
static mut IPV6_ONLY_TEST_DOMAIN: &str = "v6.ipv6test.app";
static mut DUAL_IP_TEST_DOMAIN: &str = "ipv6test.app";
#[test]
#[ctor]
fn init() {
// *** To use nonlocal domain names, set TEST_NONLOCAL_DOMAINS variable. ***
// *** TEST_NONLOCAL_DOMAINS=1 cargo test ***
TEST_INIT.call_once(|| {
if env::var_os("TEST_NONLOCAL_DOMAINS").is_none() {
eprintln!("[warn] dns: using 'localhost' for testing.");
unsafe {
IPV4_ONLY_TEST_DOMAIN = "localhost";
IPV6_ONLY_TEST_DOMAIN = "::1";
DUAL_IP_TEST_DOMAIN = "localhost";
}
}
});
}
#[test]
fn test_resolve_valid_domain() {
let domain = unsafe { IPV4_ONLY_TEST_DOMAIN };
let rresults = resolve(domain, IpAddrType::IPAddrMixed).unwrap();
assert!(!rresults.is_empty());
}
#[test]
fn test_resolve_valid_dual_ip_domain() {
let domain = unsafe { DUAL_IP_TEST_DOMAIN };
let rresults = resolve(domain, IpAddrType::IPAddrMixed).unwrap();
assert!(!rresults.is_empty());
}
#[test]
fn test_resolve_invalid_domain() {
let domain = unsafe { INVALID_TEST_DOMAIN };
let rresults = resolve(domain, IpAddrType::IPAddrMixed);
assert!(rresults.is_err() && rresults.err().unwrap().eq("DNS lookup failed!"));
}
#[test]
fn test_resolve_ipv4_only() {
let domain = unsafe { IPV4_ONLY_TEST_DOMAIN };
let rresults = resolve(domain, IpAddrType::IPAddrV4Only).unwrap();
assert!(rresults.iter().all(|item| item.ip_addr().is_ipv4()));
}
#[test]
fn test_resolve_ipv6_only() {
let domain = unsafe { IPV6_ONLY_TEST_DOMAIN };
let rresults = resolve(domain, IpAddrType::IPAddrV6Only).unwrap();
assert!(rresults.iter().all(|item| item.ip_addr().is_ipv6()));
}
#[test]
fn test_resolve_no_accepted_ip() {
let domain = unsafe { IPV4_ONLY_TEST_DOMAIN };
let rresults = resolve(domain, IpAddrType::IPAddrV6Only);
assert!(rresults.is_err() && rresults.err().unwrap().eq("No accepted IP was found."));
}
#[test]
fn test_resolve_single_address() {
let domain = unsafe { IPV4_ONLY_TEST_DOMAIN };
let rresult = resolve_single(domain, IpAddrType::IPAddrMixed).unwrap();
assert!(rresult.ip_addr().is_ipv4());
assert!(rresult.ttl != Duration::seconds(0));
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/vsock_proxy/src/main.rs | vsock_proxy/src/main.rs | // Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(warnings)]
/// Simple proxy for translating vsock traffic to TCP traffic
/// Example of usage:
/// vsock-proxy 8000 127.0.0.1 9000
///
use clap::{Arg, ArgAction, Command};
use env_logger::init;
use log::info;
use vsock_proxy::{
proxy::{check_allowlist, Proxy},
IpAddrType, VsockProxyResult,
};
fn main() -> VsockProxyResult<()> {
init();
let matches = Command::new("Vsock-TCP proxy")
.about("Vsock-TCP proxy")
.version(env!("CARGO_PKG_VERSION"))
.arg(
Arg::new("ipv4")
.short('4')
.long("ipv4")
.help("Force the proxy to use IPv4 addresses only.")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("ipv6")
.short('6')
.long("ipv6")
.help("Force the proxy to use IPv6 addresses only.")
.action(ArgAction::SetTrue)
.conflicts_with("ipv4"),
)
.arg(
Arg::new("workers")
.short('w')
.long("num_workers")
.help("Set the maximum number of simultaneous\nconnections supported.")
.default_value("4"),
)
.arg(
Arg::new("local_port")
.help("Local Vsock port to listen for incoming connections.")
.required(true),
)
.arg(
Arg::new("remote_addr")
.help("Address of the server to be proxyed.")
.required(true),
)
.arg(
Arg::new("remote_port")
.help("Remote TCP port of the server to be proxyed.")
.required(true),
)
.arg(
Arg::new("config_file")
.long("config")
.help("YAML file containing the services that\ncan be forwarded.\n")
.default_value("/etc/nitro_enclaves/vsock-proxy.yaml"),
)
.get_matches();
let local_port = matches
.get_one::<String>("local_port")
// This argument is required, so clap ensures it's available
.unwrap();
let local_port = local_port
.parse::<u32>()
.map_err(|_| "Local port is not valid")?;
let ipv4_only = matches.get_flag("ipv4");
let ipv6_only = matches.get_flag("ipv6");
let ip_addr_type: IpAddrType = match (ipv4_only, ipv6_only) {
(true, false) => IpAddrType::IPAddrV4Only,
(false, true) => IpAddrType::IPAddrV6Only,
_ => IpAddrType::IPAddrMixed,
};
let remote_addr = matches
.get_one::<String>("remote_addr")
// This argument is required, so clap ensures it's available
.unwrap();
let remote_port = matches
.get_one::<String>("remote_port")
// This argument is required, so clap ensures it's available
.unwrap();
let remote_port = remote_port
.parse::<u16>()
.map_err(|_| "Remote port is not valid")?;
let num_workers = matches
.get_one::<String>("workers")
// This argument has a default value, so it is available
.unwrap();
let num_workers = num_workers
.parse::<usize>()
.map_err(|_| "Number of workers is not valid")?;
if num_workers == 0 {
return Err("Number of workers must not be 0".to_string());
}
info!("Checking allowlist configuration");
let config_file = matches.get_one::<String>("config_file").map(String::as_str);
let remote_host = remote_addr.to_string();
check_allowlist(&remote_host, remote_port, config_file, ip_addr_type)
.map_err(|err| format!("Error at checking the allowlist: {err}"))?;
let mut proxy = Proxy::new(
local_port,
remote_host,
remote_port,
num_workers,
ip_addr_type,
)
.map_err(|err| format!("Could not create proxy: {err}"))?;
let listener = proxy
.sock_listen()
.map_err(|err| format!("Could not listen for connections: {err}"))?;
info!("Proxy is now in listening state");
loop {
proxy
.sock_accept(&listener)
.map_err(|err| format!("Could not accept connection: {err}"))?;
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/vsock_proxy/src/proxy.rs | vsock_proxy/src/proxy.rs | // Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(warnings)]
/// Contains code for Proxy, a library used for translating vsock traffic to
/// TCP traffic
use log::{info, warn};
use nix::sys::select::{select, FdSet};
use nix::sys::socket::SockType;
use std::fs::File;
use std::io::{Read, Write};
use std::net::{IpAddr, SocketAddr, TcpStream};
use std::os::unix::io::AsRawFd;
use threadpool::ThreadPool;
use vsock::{VsockAddr, VsockListener};
use yaml_rust2::YamlLoader;
use crate::dns::DnsResolutionInfo;
use crate::{dns, IpAddrType, VsockProxyResult};
const BUFF_SIZE: usize = 8192;
pub const VSOCK_PROXY_CID: u32 = 3;
pub const VSOCK_PROXY_PORT: u32 = 8000;
/// Checks if the forwarded server is allowed, providing its IP on success.
pub fn check_allowlist(
remote_host: &str,
remote_port: u16,
config_file: Option<&str>,
ip_addr_type: IpAddrType,
) -> VsockProxyResult<IpAddr> {
if let Some(config_file) = config_file {
let mut f = File::open(config_file).map_err(|_| "Could not open the file")?;
let mut content = String::new();
f.read_to_string(&mut content)
.map_err(|_| "Could not read the file")?;
let docs = YamlLoader::load_from_str(&content).map_err(|_| "Bad yaml format")?;
let services = (&docs[0])["allowlist"]
.as_vec()
.ok_or("No allowlist field")?;
// Obtain the remote server's IP address.
let dns_result = dns::resolve_single(remote_host, ip_addr_type)?;
let remote_addr = dns_result.ip_addr();
for raw_service in services {
let addr = raw_service["address"].as_str().ok_or("No address field")?;
let port = raw_service["port"]
.as_i64()
.ok_or("No port field or invalid type")?;
let port = port as u16;
// Start by matching against ports.
if port != remote_port {
continue;
}
// Attempt to match directly against the allowlisted hostname first.
if addr == remote_host {
info!("Matched with host name \"{}\" and port \"{}\"", addr, port);
return Ok(remote_addr);
}
// If hostname matching failed, attempt to match against IPs.
let rresults = dns::resolve(addr, ip_addr_type);
if let Some(matched_addr) = rresults
.into_iter()
.flatten()
.find(|rresult| rresult.ip_addr() == remote_addr)
.map(|_| remote_addr)
{
info!(
"Matched with host IP \"{}\" and port \"{}\"",
matched_addr, port
);
return Ok(matched_addr);
}
}
warn!("Unable to resolve allow listed host: {:?}.", remote_host);
}
Err("The given address and port are not allowed".to_string())
}
/// Configuration parameters for port listening and remote destination
pub struct Proxy {
local_port: u32,
remote_host: String,
remote_port: u16,
dns_resolution_info: Option<DnsResolutionInfo>,
pool: ThreadPool,
sock_type: SockType,
ip_addr_type: IpAddrType,
}
impl Proxy {
pub fn new(
local_port: u32,
remote_host: String,
remote_port: u16,
num_workers: usize,
ip_addr_type: IpAddrType,
) -> VsockProxyResult<Self> {
let pool = ThreadPool::new(num_workers);
let sock_type = SockType::Stream;
let dns_resolution_info: Option<DnsResolutionInfo> = None;
Ok(Proxy {
local_port,
remote_host,
remote_port,
dns_resolution_info,
pool,
sock_type,
ip_addr_type,
})
}
/// Creates a listening socket
/// Returns the file descriptor for it or the appropriate error
pub fn sock_listen(&self) -> VsockProxyResult<VsockListener> {
let sockaddr = VsockAddr::new(VSOCK_PROXY_CID, self.local_port);
let listener = VsockListener::bind(&sockaddr)
.map_err(|_| format!("Could not bind to {sockaddr:?}"))?;
info!("Bound to {:?}", sockaddr);
Ok(listener)
}
/// Accepts an incoming connection coming on listener and handles it on a
/// different thread
/// Returns the handle for the new thread or the appropriate error
pub fn sock_accept(&mut self, listener: &VsockListener) -> VsockProxyResult<()> {
let (mut client, client_addr) = listener
.accept()
.map_err(|_| "Could not accept connection")?;
info!("Accepted connection on {:?}", client_addr);
let dns_needs_resolution = self
.dns_resolution_info
.map_or(true, |info| info.is_expired());
let remote_addr = if dns_needs_resolution {
info!("Resolving hostname: {}.", self.remote_host);
let dns_resolution = dns::resolve_single(&self.remote_host, self.ip_addr_type)?;
info!(
"Using IP \"{:?}\" for the given server \"{}\". (TTL: {} secs)",
dns_resolution.ip_addr(),
self.remote_host,
dns_resolution.ttl().num_seconds()
);
self.dns_resolution_info = Some(dns_resolution);
dns_resolution.ip_addr()
} else {
self.dns_resolution_info
.ok_or("DNS resolution failed!")?
.ip_addr()
};
let sockaddr = SocketAddr::new(remote_addr, self.remote_port);
let sock_type = self.sock_type;
self.pool.execute(move || {
let mut server = match sock_type {
SockType::Stream => TcpStream::connect(sockaddr)
.map_err(|_| format!("Could not connect to {sockaddr:?}")),
_ => Err("Socket type not implemented".to_string()),
}
.expect("Could not create connection");
info!("Connected client from {:?} to {:?}", client_addr, sockaddr);
let client_socket = client.as_raw_fd();
let server_socket = server.as_raw_fd();
let mut disconnected = false;
while !disconnected {
let mut set = FdSet::new();
set.insert(client_socket);
set.insert(server_socket);
select(None, Some(&mut set), None, None, None).expect("select");
if set.contains(client_socket) {
disconnected = transfer(&mut client, &mut server);
}
if set.contains(server_socket) {
disconnected = transfer(&mut server, &mut client);
}
}
info!("Client on {:?} disconnected", client_addr);
});
Ok(())
}
}
/// Transfers a chunck of maximum 4KB from src to dst
/// If no error occurs, returns true if the source disconnects and false otherwise
fn transfer(src: &mut dyn Read, dst: &mut dyn Write) -> bool {
let mut buffer = [0u8; BUFF_SIZE];
let nbytes = src.read(&mut buffer);
let nbytes = nbytes.unwrap_or(0);
if nbytes == 0 {
return true;
}
dst.write_all(&buffer[..nbytes]).is_err()
}
#[cfg(test)]
mod tests {
use rand;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::process::Command;
use super::*;
/// Test transfer function with more data than buffer
#[test]
fn test_transfer() {
let data: Vec<u8> = (0..2 * BUFF_SIZE).map(|_| rand::random::<u8>()).collect();
let _ret = fs::create_dir("tmp");
let mut src = File::create("tmp/src").unwrap();
let mut dst = File::create("tmp/dst").unwrap();
let _ret = src.write_all(&data);
let mut src = File::open("tmp/src").unwrap();
while !transfer(&mut src, &mut dst) {}
let status = Command::new("cmp")
.arg("tmp/src")
.arg("tmp/dst")
.status()
.expect("command");
let _ret = fs::remove_dir_all("tmp");
assert!(status.success());
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/vsock_proxy/tests/connection_test.rs | vsock_proxy/tests/connection_test.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(warnings)]
use std::io::{Read, Write};
use std::net::TcpListener;
use std::net::{IpAddr, Ipv4Addr};
use std::str;
use std::sync::mpsc;
use std::thread;
use tempfile::NamedTempFile;
use vsock::{VsockAddr, VsockStream};
use vsock_proxy::{proxy::Proxy, IpAddrType};
fn vsock_connect(port: u32) -> VsockStream {
let sockaddr = VsockAddr::new(vsock_proxy::proxy::VSOCK_PROXY_CID, port);
VsockStream::connect(&sockaddr).expect("Could not connect")
}
/// Test connection with both client and server sending each other messages
#[test]
fn test_tcp_connection() {
// Proxy will translate from port 8000 vsock to localhost port 9000 TCP
let addr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).to_string();
let mut file = NamedTempFile::new().unwrap();
file.write_all(
b"allowlist:\n\
- {address: 127.0.0.1, port: 9000}",
)
.unwrap();
let mut proxy = Proxy::new(
vsock_proxy::proxy::VSOCK_PROXY_PORT,
addr,
9000,
2,
IpAddrType::IPAddrMixed,
)
.unwrap();
let (tx, rx) = mpsc::channel();
// Create a listening TCP server on port 9000
let server_handle = thread::spawn(move || {
let server = TcpListener::bind("127.0.0.1:9000").expect("server bind");
tx.send(true).expect("server send event");
let (mut stream, _) = server.accept().expect("server accept");
// Read request
let mut buf = [0; 13];
stream.read_exact(&mut buf).expect("server read");
let msg = str::from_utf8(&buf).expect("from_utf8");
assert_eq!(msg, "client2server");
// Write response
stream.write_all(b"server2client").expect("server write");
});
let _ret = rx.recv().expect("main recv event");
let (tx, rx) = mpsc::channel();
// Start proxy in a different thread
let ret = proxy.sock_listen();
let listener = ret.expect("proxy listen");
let proxy_handle = thread::spawn(move || {
tx.send(true).expect("proxy send event");
proxy.sock_accept(&listener).expect("proxy accept");
});
let _ret = rx.recv().expect("main recv event");
// Start client that connects to proxy on port 8000 vsock
let client_handle = thread::spawn(move || {
let mut stream = vsock_connect(vsock_proxy::proxy::VSOCK_PROXY_PORT);
// Write request
stream.write_all(b"client2server").expect("client write");
// Read response
let mut buf = [0; 13];
stream.read_exact(&mut buf).expect("client read");
let msg = str::from_utf8(&buf).expect("from_utf8");
assert_eq!(msg, "server2client");
});
server_handle.join().expect("Server panicked");
proxy_handle.join().expect("Proxy panicked");
client_handle.join().expect("Client panicked");
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/eif_loader/src/lib.rs | eif_loader/src/lib.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(warnings)]
use libc::c_int;
use nix::poll::poll;
use nix::poll::{PollFd, PollFlags};
use std::io::Read;
use std::io::Write;
use std::os::unix::io::AsRawFd;
use vsock::VsockListener;
/// Timeout of 1 second in milliseconds
pub const TIMEOUT_SECOND_MS: i32 = 1000;
/// Timeout of 1 minute 30 seconds in milliseconds
pub const TIMEOUT_MINUTE_MS: i32 = 90 * TIMEOUT_SECOND_MS;
const HEART_BEAT: u8 = 0xB7;
#[derive(Debug, PartialEq, Eq)]
/// Internal errors while sending an Eif file
pub enum EifLoaderError {
SocketPollingError,
VsockAcceptingError,
VsockBindingError,
VsockReceivingError,
VsockTimeoutError,
}
pub fn enclave_ready(
listener: VsockListener,
poll_timeout_ms: c_int,
) -> Result<(), EifLoaderError> {
let mut poll_fds = [PollFd::new(listener.as_raw_fd(), PollFlags::POLLIN)];
let result = poll(&mut poll_fds, poll_timeout_ms);
if result == Ok(0) {
return Err(EifLoaderError::VsockTimeoutError);
} else if result != Ok(1) {
return Err(EifLoaderError::SocketPollingError);
}
let mut stream = listener
.accept()
.map_err(|_err| EifLoaderError::VsockAcceptingError)?;
// Wait until the other end is closed
let mut buf = [0u8];
let bytes = stream
.0
.read(&mut buf)
.map_err(|_err| EifLoaderError::VsockReceivingError)?;
if bytes != 1 || buf[0] != HEART_BEAT {
return Err(EifLoaderError::VsockReceivingError);
}
stream
.0
.write_all(&buf)
.map_err(|_err| EifLoaderError::VsockReceivingError)?;
Ok(())
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/lib.rs | src/lib.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
#![allow(clippy::too_many_arguments)]
//! This crate provides the functionality for the Nitro CLI process.
/// The common module (shared between the CLI and enclave process).
pub mod common;
/// The enclave process module.
pub mod enclave_proc;
/// The module covering the communication between a CLI instance and enclave processes.
pub mod enclave_proc_comm;
/// The CLI-specific utilities module.
pub mod utils;
use aws_nitro_enclaves_image_format::defs::eif_hasher::EifHasher;
use aws_nitro_enclaves_image_format::utils::eif_reader::EifReader;
use aws_nitro_enclaves_image_format::utils::eif_signer::EifSigner;
use aws_nitro_enclaves_image_format::utils::SignKeyData;
use aws_nitro_enclaves_image_format::{generate_build_info, utils::get_pcrs};
use log::{debug, info};
use sha2::{Digest, Sha384};
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::fs::{File, OpenOptions};
use std::io::{self, Read, Write};
use std::os::unix::net::UnixStream;
use std::path::{Path, PathBuf};
use common::commands_parser::{BuildEnclavesArgs, EmptyArgs, RunEnclavesArgs, SignEifArgs};
use common::json_output::{
EifDescribeInfo, EnclaveBuildInfo, EnclaveTerminateInfo, MetadataDescribeInfo,
};
use common::{enclave_proc_command_send_single, get_sockets_dir_path};
use common::{EnclaveProcessCommandType, NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use enclave_proc_comm::{
enclave_proc_command_send_all, enclave_proc_handle_outputs, enclave_process_handle_all_replies,
};
use utils::{Console, PcrType};
/// Hypervisor CID as defined by <http://man7.org/linux/man-pages/man7/vsock.7.html>.
pub const VMADDR_CID_HYPERVISOR: u32 = 0;
/// An offset applied to an enclave's CID in order to determine its console port.
pub const CID_TO_CONSOLE_PORT_OFFSET: u32 = 10000;
/// Default blobs path to be used if the corresponding environment variable is not set.
const DEFAULT_BLOBS_PATH: &str = "/usr/share/nitro_enclaves/blobs/";
/// Build an enclave image file with the provided arguments.
pub fn build_enclaves(args: BuildEnclavesArgs) -> NitroCliResult<()> {
debug!("build_enclaves");
eprintln!("Start building the Enclave Image...");
build_from_docker(
&args.docker_uri,
&args.docker_dir,
&args.output,
&args.signing_certificate,
&args.private_key,
&args.img_name,
&args.img_version,
&args.metadata,
)
.map_err(|e| e.add_subaction("Failed to build EIF from docker".to_string()))?;
Ok(())
}
/// Build an enclave image file from a Docker image.
pub fn build_from_docker(
docker_uri: &str,
docker_dir: &Option<String>,
output_path: &str,
signing_certificate: &Option<String>,
private_key: &Option<String>,
img_name: &Option<String>,
img_version: &Option<String>,
metadata_path: &Option<String>,
) -> NitroCliResult<(File, BTreeMap<String, String>)> {
let blobs_path =
blobs_path().map_err(|e| e.add_subaction("Failed to retrieve blobs path".to_string()))?;
let cmdline_file_path = format!("{blobs_path}/cmdline");
let mut cmdline_file = File::open(cmdline_file_path.clone()).map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not open kernel command line file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![&cmdline_file_path, "Open"])
})?;
let mut cmdline = String::new();
cmdline_file.read_to_string(&mut cmdline).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to read kernel command line: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![&cmdline_file_path, "Read"])
})?;
let mut file_output = OpenOptions::new()
.read(true)
.create(true)
.write(true)
.truncate(true)
.open(output_path)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not create output file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![output_path, "Open"])
})?;
let kernel_image_name = match std::env::consts::ARCH {
"aarch64" => "Image",
"x86_64" => "bzImage",
_ => "undefined",
};
let kernel_path = format!("{blobs_path}/{kernel_image_name}");
let build_info = generate_build_info!(&format!("{kernel_path}.config")).map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not generate build info: {e:?}"),
NitroCliErrorEnum::EifBuildingError
)
})?;
let mut docker2eif = enclave_build::Docker2Eif::new(
docker_uri.to_string(),
format!("{blobs_path}/init"),
format!("{blobs_path}/nsm.ko"),
kernel_path,
cmdline.trim().to_string(),
format!("{blobs_path}/linuxkit"),
&mut file_output,
artifacts_path()?,
signing_certificate,
private_key,
img_name.clone(),
img_version.clone(),
metadata_path.clone(),
build_info,
)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to create EIF image: {err:?}"),
NitroCliErrorEnum::EifBuildingError
)
})?;
if let Some(docker_dir) = docker_dir {
docker2eif
.build_docker_image(docker_dir.clone())
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to build docker image: {err:?}"),
NitroCliErrorEnum::DockerImageBuildError
)
})?;
} else {
docker2eif.pull_docker_image().map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to pull docker image: {err:?}"),
NitroCliErrorEnum::DockerImagePullError
)
})?;
}
let measurements = docker2eif.create().map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to create EIF image: {err:?}"),
NitroCliErrorEnum::EifBuildingError
)
})?;
eprintln!("Enclave Image successfully created.");
let info = EnclaveBuildInfo::new(measurements.clone());
println!(
"{}",
serde_json::to_string_pretty(&info).map_err(|err| new_nitro_cli_failure!(
&format!("Failed to display EnclaveBuild data: {err:?}"),
NitroCliErrorEnum::SerdeError
))?
);
Ok((file_output, measurements))
}
/// Creates new enclave name
///
/// Requests the names of all running instances and checks the
/// occurrence of the chosen name for the new enclave.
pub fn new_enclave_name(run_args: RunEnclavesArgs, names: Vec<String>) -> NitroCliResult<String> {
let enclave_name = match run_args.enclave_name {
Some(enclave_name) => enclave_name,
None => {
// Get name of EIF file from path eg. path/to/eif/hello.eif -> hello
// If the extension is missing, the whole file name will be chosen
let path_split: Vec<&str> = run_args.eif_path.split('/').collect();
path_split[path_split.len() - 1]
.trim_end_matches(".eif")
.to_string()
}
};
let mut idx = 0;
let mut result_name = enclave_name.clone();
// If duplicates are found, add index to name eg. testName -> testName_1 -> testName_2 ..
while names.contains(&result_name) {
idx += 1;
result_name = enclave_name.clone() + &'_'.to_string() + &idx.to_string();
}
Ok(result_name)
}
/// Returns information related to the given EIF
///
/// Calculates PCRs 0, 1, 2, 8 at each call in addition to metadata,
/// EIF details, identification provided by the user at build.
pub fn describe_eif(eif_path: String) -> NitroCliResult<EifDescribeInfo> {
let mut eif_reader = EifReader::from_eif(eif_path).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to initialize EIF reader: {e:?}"),
NitroCliErrorEnum::EifParsingError
)
})?;
let measurements = get_pcrs(
&mut eif_reader.image_hasher,
&mut eif_reader.bootstrap_hasher,
&mut eif_reader.app_hasher,
&mut eif_reader.cert_hasher,
Sha384::new(),
eif_reader.signature_section.is_some(),
)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to get PCR values: {e:?}"),
NitroCliErrorEnum::EifParsingError
)
})?;
let mut describe_meta: Option<MetadataDescribeInfo> = None;
let mut img_name: Option<String> = None;
let mut img_version: Option<String> = None;
if let Some(meta) = eif_reader.get_metadata() {
img_name = Some(meta.img_name.clone());
img_version = Some(meta.img_version.clone());
describe_meta = Some(MetadataDescribeInfo::new(meta));
}
let mut info = EifDescribeInfo {
version: eif_reader.get_header().version,
build_info: EnclaveBuildInfo::new(measurements.clone()),
is_signed: false,
cert_info: None,
crc_check: eif_reader.check_crc(),
sign_check: None,
img_name,
img_version,
metadata: describe_meta,
};
// Check if signature section is present
if measurements.contains_key("PCR8") {
let cert_info = eif_reader
.get_certificate_info(measurements)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to get certificate sigining info: {err:?}"),
NitroCliErrorEnum::EifParsingError
)
})?;
info.is_signed = true;
info.cert_info = Some(cert_info);
info.sign_check = eif_reader.sign_check;
}
println!(
"{}",
serde_json::to_string_pretty(&info)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to display EIF describe data: {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?
.as_str(),
);
Ok(info)
}
/// Signs EIF with the given key and certificate. If EIF already has a signature, it will be replaced.
pub fn sign_eif(args: SignEifArgs) -> NitroCliResult<()> {
let sign_info = match (&args.private_key, &args.signing_certificate) {
(Some(key), Some(cert)) => SignKeyData::new(key, Path::new(&cert)).map_or_else(
|e| {
eprintln!("Could not read signing info: {e:?}");
None
},
Some,
),
_ => None,
};
let signer = EifSigner::new(sign_info).ok_or_else(|| {
new_nitro_cli_failure!(
format!("Failed to create EifSigner"),
NitroCliErrorEnum::EIFSigningError
)
})?;
signer.sign_image(&args.eif_path).map_err(|e| {
new_nitro_cli_failure!(
format!("Failed to sign image: {}", e),
NitroCliErrorEnum::EIFSigningError
)
})?;
eprintln!("Enclave Image successfully signed.");
let mut eif_reader = EifReader::from_eif(args.eif_path).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to initialize EIF reader: {e:?}"),
NitroCliErrorEnum::EifParsingError
)
})?;
eif_reader
.get_measurements()
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to get PCR values: {e:?}"),
NitroCliErrorEnum::EifParsingError
)
})
.and_then(|measurements| {
let info = EnclaveBuildInfo::new(measurements);
let printed_info = serde_json::to_string_pretty(&info).map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to display EnclaveBuild data: {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?;
println!("{printed_info}");
Ok(())
})
}
/// Returns the value of the `NITRO_CLI_BLOBS` environment variable.
///
/// This variable specifies where all the blobs necessary for building
/// an enclave image are stored. As of now the blobs are:
/// - *bzImage*: A kernel image if the local arch is x86_64 or
/// - *Image* : A kernel image if the local arch is aarch64
/// - *init*: The initial init process that is bootstraping the environment.
/// - *linuxkit*: A slightly modified version of linuxkit.
/// - *cmdline*: A file containing the kernel commandline.
fn blobs_path() -> NitroCliResult<String> {
// TODO Improve error message with a suggestion to the user
// consider using the default path used by rpm install
let blobs_res = std::env::var("NITRO_CLI_BLOBS");
Ok(blobs_res.unwrap_or_else(|_| DEFAULT_BLOBS_PATH.to_string()))
}
/// Returns the value of the `NITRO_CLI_ARTIFACTS` environment variable.
///
/// This variable configures the path where the build artifacts should be saved.
fn artifacts_path() -> NitroCliResult<String> {
if let Ok(artifacts) = std::env::var("NITRO_CLI_ARTIFACTS") {
std::fs::create_dir_all(artifacts.clone()).map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not create artifacts path {artifacts}: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![&artifacts, "Create"])
})?;
Ok(artifacts)
} else if let Ok(home) = std::env::var("HOME") {
let artifacts = format!("{home}/.nitro_cli/");
std::fs::create_dir_all(artifacts.clone()).map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not create artifacts path {artifacts}: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![&artifacts, "Create"])
})?;
Ok(artifacts)
} else {
Err(new_nitro_cli_failure!(
"Could not find a folder for the CLI artifacts, set either HOME or NITRO_CLI_ARTIFACTS",
NitroCliErrorEnum::ArtifactsPathNotSet
))
}
}
/// Wrapper over the console connection function.
pub fn console_enclaves(
enclave_cid: u64,
disconnect_timeout_sec: Option<u64>,
) -> NitroCliResult<()> {
debug!("console_enclaves");
println!("Connecting to the console for enclave {enclave_cid}...");
enclave_console(enclave_cid, disconnect_timeout_sec)?;
Ok(())
}
/// Connects to the enclave console and prints it continously.
pub fn enclave_console(
enclave_cid: u64,
disconnect_timeout_sec: Option<u64>,
) -> NitroCliResult<()> {
let console = Console::new(
VMADDR_CID_HYPERVISOR,
u32::try_from(enclave_cid).map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to parse enclave CID: {err:?}"),
NitroCliErrorEnum::IntegerParsingError
)
})? + CID_TO_CONSOLE_PORT_OFFSET,
)
.map_err(|e| e.add_subaction("Connect to enclave console".to_string()))?;
println!("Successfully connected to the console.");
console
.read_to(io::stdout().by_ref(), disconnect_timeout_sec)
.map_err(|e| e.add_subaction("Connect to enclave console".to_string()))?;
Ok(())
}
/// Terminates all enclave instances belonging to the current user (or all
/// instances, if the current user has `root` permissions).
pub fn terminate_all_enclaves() -> NitroCliResult<()> {
let sockets_dir = get_sockets_dir_path();
let mut replies: Vec<UnixStream> = vec![];
let sockets = std::fs::read_dir(sockets_dir.as_path()).map_err(|e| {
new_nitro_cli_failure!(
&format!("Error while accessing sockets directory: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![
sockets_dir
.as_path()
.to_str()
.unwrap_or("Invalid unicode directory name"),
"Read",
])
})?;
let mut err_socket_files: usize = 0;
let mut failed_connections: Vec<PathBuf> = Vec::new();
for socket in sockets {
let entry = match socket {
Ok(value) => value,
Err(_) => {
err_socket_files += 1;
continue;
}
};
// Send a `terminate-enclave` command through each socket,
// irrespective of the enclave process owner. The security policy
// inside the enclave process is responsible with checking the
// command's permissions.
let mut stream = match UnixStream::connect(entry.path()) {
Ok(value) => value,
Err(_) => {
failed_connections.push(entry.path());
continue;
}
};
if enclave_proc_command_send_single::<EmptyArgs>(
EnclaveProcessCommandType::Terminate,
None,
&mut stream,
)
.is_err()
{
failed_connections.push(entry.path());
} else {
replies.push(stream);
}
}
// Remove stale socket files.
for stale_socket in &failed_connections {
info!("Deleting stale socket: {:?}", stale_socket);
let _ = std::fs::remove_file(stale_socket);
}
enclave_process_handle_all_replies::<EnclaveTerminateInfo>(
&mut replies,
failed_connections.len() + err_socket_files,
true,
vec![0, libc::EACCES],
)
.map_err(|e| e.add_subaction("Failed to handle all enclave processes replies".to_string()))
.map(|_| ())
}
/// Queries all enclaves for their name
pub fn get_all_enclave_names() -> NitroCliResult<Vec<String>> {
let (comms, _) =
enclave_proc_command_send_all::<EmptyArgs>(EnclaveProcessCommandType::GetEnclaveName, None)
.map_err(|e| {
e.add_subaction(
"Failed to send GetEnclaveName command to all enclave processes".to_string(),
)
.set_action("Get Enclave Names".to_string())
})?;
let mut replies: Vec<UnixStream> = vec![];
replies.extend(comms);
let objects = enclave_proc_handle_outputs::<String>(&mut replies)
.iter()
.map(|v| v.0.clone())
.collect();
Ok(objects)
}
/// Sends the name to all the running enclaves and expects a response
/// with the ID of the one that uniquely matched
pub fn get_id_by_name(name: String) -> NitroCliResult<String> {
let (comms, _) = enclave_proc_command_send_all::<String>(
EnclaveProcessCommandType::GetIDbyName,
Some(&name),
)
.map_err(|e| {
e.add_subaction("Failed to send GetIDbyName command to all enclave processes".to_string())
.set_action("Get Enclave Names".to_string())
})?;
let mut replies: Vec<UnixStream> = vec![];
replies.extend(comms);
let mut objects: Vec<String> = enclave_proc_handle_outputs::<String>(&mut replies)
.iter()
.map(|v| v.0.clone())
.collect();
// Check if the name was not found or if there are multiple matches
if objects.len() != 1 {
return Err(new_nitro_cli_failure!(
match objects.len() {
0 => "No enclave matched the given name.".to_string(),
_ => "Conflicting enclave names have been found.".to_string(),
},
NitroCliErrorEnum::EnclaveNamingError
));
}
Ok(objects.remove(0))
}
/// For the given file, return the PCR value
///
/// Based on the pcr_type, calculate the PCR hash of the input. The default
/// type takes the bytes of the input file and adds them to the hasher.
/// The certificate type performs additional serialization before hashing.
pub fn get_file_pcr(path: String, pcr_type: PcrType) -> NitroCliResult<BTreeMap<String, String>> {
let mut key = "PCR".to_string();
// Initialize hasher
let mut hasher = EifHasher::new_without_cache(Sha384::new()).map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not create hasher: {e:?}"),
NitroCliErrorEnum::HasherError
)
})?;
let mut file = File::open(path).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to open file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
})?;
let mut buf = Vec::new();
file.read_to_end(&mut buf).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to read file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
})?;
// Treat the input buffer by PCR type
match pcr_type {
PcrType::DefaultType => {}
PcrType::SigningCertificate => {
key = "PCR8".to_string();
let cert = openssl::x509::X509::from_pem(&buf[..]).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to deserialize .pem: {e:?}"),
NitroCliErrorEnum::HasherError
)
})?;
buf = cert.to_der().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to serialize certificate: {e:?}"),
NitroCliErrorEnum::HasherError
)
})?;
}
}
hasher.write_all(&buf).map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not write to hasher: {e:?}"),
NitroCliErrorEnum::HasherError
)
})?;
let hash = hex::encode(hasher.tpm_extend_finalize_reset().map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not get result for hasher: {e:?}"),
NitroCliErrorEnum::HasherError
)
})?);
let mut result = BTreeMap::new();
result.insert(key, hash);
println!(
"{}",
serde_json::to_string_pretty(&result)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to display PCR(s): {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?
.as_str(),
);
Ok(result)
}
/// Macro defining the arguments configuration for a *Nitro CLI* application.
#[macro_export]
macro_rules! create_app {
() => {
Command::new("Nitro CLI")
.about("CLI for enclave lifetime management")
.arg_required_else_help(true)
.subcommand(
Command::new("run-enclave")
.about("Starts a new enclave")
.arg(
Arg::new("cpu-ids")
.long("cpu-ids")
.help("List of cpu-ids that will be provided to the enclave")
.num_args(1..)
.required_unless_present_any(["cpu-count", "config"])
.conflicts_with_all(["cpu-count", "config"]),
)
.arg(
Arg::new("cpu-count")
.long("cpu-count")
.help("Number of cpus")
.required_unless_present_any(["cpu-ids", "config"])
.conflicts_with_all(["cpu-ids", "config"]),
)
.arg(
Arg::new("memory")
.long("memory")
.help(
"Memory to allocate for the enclave in MB. Depending on the available \
pages, more might be allocated."
)
.required_unless_present("config")
.conflicts_with("config"),
)
.arg(
Arg::new("eif-path")
.long("eif-path")
.help("Path pointing to a prebuilt Eif image")
.required_unless_present("config")
.conflicts_with("config"),
)
.arg(
Arg::new("enclave-cid")
.long("enclave-cid")
.help("CID to be used for the newly started enclave")
.conflicts_with("config"),
)
.arg(
Arg::new("debug-mode")
.long("debug-mode")
.action(clap::ArgAction::SetTrue)
.help(
"Starts enclave in debug-mode. This makes the console of the enclave \
available over vsock at CID: VMADDR_CID_HYPERVISOR (0), port: \
enclave_cid + 10000. \n The stream could be accessed with the console \
sub-command"
)
.conflicts_with("config"),
)
.arg(
Arg::new("attach-console")
.long("attach-console")
.action(clap::ArgAction::SetTrue)
.help(
"Attach the enclave console immediately after starting the enclave. \
(implies debug-mode)"
)
)
.arg(
Arg::new("enclave-name")
.long("enclave-name")
.help("Custom name assigned to the enclave by the user")
.conflicts_with("config"),
)
.arg(
Arg::new("config")
.long("config")
.value_name("json-config")
.help("Config is used to read enclave settings from JSON file"),
),
)
.subcommand(
Command::new("terminate-enclave")
.about("Terminates an enclave")
.arg(
Arg::new("enclave-id")
.long("enclave-id")
.help("Enclave ID, used to uniquely identify an enclave")
.required_unless_present_any(["all", "enclave-name"])
.conflicts_with_all(["all", "enclave-name"]),
)
.arg(
Arg::new("all")
.long("all")
.action(clap::ArgAction::SetTrue)
.help("Terminate all running enclave instances belonging to the current user")
.required_unless_present_any(["enclave-id", "enclave-name"])
.conflicts_with_all(["enclave-id", "enclave-name"]),
)
.arg(
Arg::new("enclave-name")
.long("enclave-name")
.help("Enclave name, used to uniquely identify an enclave")
.required_unless_present_any(["enclave-id", "all"])
.conflicts_with_all(["enclave-id", "all"]),
),
)
.subcommand(
Command::new("build-enclave")
.about("Builds an enclave image and saves it to a file")
.arg(
Arg::new("docker-uri")
.long("docker-uri")
.help(
"Uri pointing to an existing docker container or to be created \
locally when docker-dir is present"
)
.required(true),
)
.arg(
Arg::new("docker-dir")
.long("docker-dir")
.help("Local path to a directory containing a Dockerfile"),
)
.arg(
Arg::new("output-file")
.long("output-file")
.help("Location where the Enclave Image should be saved")
.required(true),
)
.arg(
Arg::new("signing-certificate")
.long("signing-certificate")
.help("Local path to developer's X509 signing certificate.")
.requires("private-key"),
)
.arg(
Arg::new("private-key")
.long("private-key")
.help("KMS key ARN or local path to developer's Eliptic Curve private key.")
.requires("signing-certificate"),
)
.arg(
Arg::new("image_name")
.long("name")
.help("Name for enclave image"),
)
.arg(
Arg::new("image_version")
.long("version")
.help("Version of the enclave image"),
)
.arg(
Arg::new("metadata")
.long("metadata")
.help("Path to JSON containing the custom metadata provided by the user."),
),
)
.subcommand(
Command::new("describe-eif")
.about("Returns information about the EIF found at a given path.")
.arg(
Arg::new("eif-path")
.long("eif-path")
.help("Path to the EIF to describe.")
.required(true),
),
)
.subcommand(
Command::new("describe-enclaves")
.about("Returns a list of the running enclaves")
.arg(
Arg::new("metadata")
.long("metadata")
.help("Adds EIF metadata of the current enclaves to the command output.")
.action(clap::ArgAction::SetTrue)
),
)
.subcommand(
Command::new("console")
.about("Connect to the console of an enclave")
.arg(
Arg::new("enclave-id")
.long("enclave-id")
.help("Enclave ID, used to uniquely identify an enclave")
.required_unless_present("enclave-name")
.conflicts_with("enclave-name"),
)
.arg(
Arg::new("disconnect-timeout")
.long("disconnect-timeout")
.help("The time in seconds after the console disconnects from the enclave"),
)
.arg(
Arg::new("enclave-name")
.long("enclave-name")
.help("Enclave name, used to uniquely identify an enclave")
.required_unless_present("enclave-id")
.conflicts_with("enclave-id"),
),
)
.subcommand(
Command::new("pcr")
.about("Return the PCR hash value of the given input")
.arg(
Arg::new("signing-certificate")
.long("signing-certificate")
.help("Takes the path to the '.pem' signing certificate and returns PCR8. Can be used to identify the certificate used to sign an EIF")
.required_unless_present("input")
.conflicts_with("input"),
)
.arg(
Arg::new("input")
.long("input")
.help("Given a path to a file, returns the PCR hash of the bytes it contains")
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | true |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/utils.rs | src/utils.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use libc::{c_void, close};
use nix::poll::poll;
use nix::poll::{PollFd, PollFlags};
use nix::sys::socket::{connect, socket};
use nix::sys::socket::{AddressFamily, SockFlag, SockType, VsockAddr};
use nix::sys::time::{TimeVal, TimeValLike};
use nix::unistd::read;
use std::io::Write;
use std::mem::size_of;
use std::os::unix::io::IntoRawFd;
use std::os::unix::io::RawFd;
use std::thread::sleep;
use std::time::{Duration, SystemTime};
use vmm_sys_util::epoll::{ControlOperation, Epoll, EpollEvent, EventSet};
use vmm_sys_util::timerfd::TimerFd;
use crate::common::{NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use crate::new_nitro_cli_failure;
/// The size of the buffers used for reading console data.
const BUFFER_SIZE: usize = 1024;
/// The console connection time-out, in milliseconds.
const CONSOLE_CONNECT_TIMEOUT: i64 = 20000;
/// The `poll` time-out, in milliseconds.
const POLL_TIMEOUT: i32 = 10000;
/// The socket connection time-out flag.
const SO_VM_SOCKETS_CONNECT_TIMEOUT: i32 = 6;
/// The amount of time to wait between consecutive console reads, in milliseconds.
const TIMEOUT: u64 = 100;
/// Defines the types of PCRs that can be measured by `pcr` command
pub enum PcrType {
/// Used for files containing the bytes for hashing
DefaultType,
/// Used for `.pem` files that we want to hash. Additional serializing is needed
SigningCertificate,
}
/// The structure representing the console of an enclave.
pub struct Console {
/// The file descriptor used for connecting to the enclave's console.
fd: RawFd,
}
impl Drop for Console {
fn drop(&mut self) {
unsafe { close(self.fd) };
}
}
impl Console {
/// Create a new blocking `Console` connection from a given enclave CID and a vsock port.
pub fn new(cid: u32, port: u32) -> NitroCliResult<Self> {
let socket_fd = socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::empty(),
None,
)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to create blocking console socket: {err:?}"),
NitroCliErrorEnum::SocketError
)
})?;
let sockaddr = VsockAddr::new(cid, port);
vsock_set_connect_timeout(socket_fd, CONSOLE_CONNECT_TIMEOUT).map_err(|err| {
err.add_subaction("Failed to set console connect timeout".to_string())
})?;
connect(socket_fd, &sockaddr).map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to connect to the console: {err:?}"),
NitroCliErrorEnum::EnclaveConsoleConnectionFailure
)
})?;
Ok(Console { fd: socket_fd })
}
/// Create a new non-blocking `Console` connection from a given enclave CID and a vsock port.
pub fn new_nonblocking(cid: u32, port: u32) -> NitroCliResult<Self> {
// create new non blocking socket
let socket_fd = socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::SOCK_NONBLOCK,
None,
)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to create nonblocking console socket: {err:?}"),
NitroCliErrorEnum::SocketError
)
})?;
vsock_set_connect_timeout(socket_fd, CONSOLE_CONNECT_TIMEOUT).map_err(|err| {
err.add_subaction("Failed to set console connect timeout".to_string())
})?;
let sockaddr = VsockAddr::new(cid, port);
let result = connect(socket_fd, &sockaddr);
match result {
Ok(_) => println!("Connected to the console"),
Err(error) => match error {
// If the connection is not ready, wait until socket_fd is ready for writing.
nix::errno::Errno::EINPROGRESS => {
let poll_fd = PollFd::new(socket_fd, PollFlags::POLLOUT);
let mut poll_fds = [poll_fd];
match poll(&mut poll_fds, POLL_TIMEOUT) {
Ok(1) => println!("Connected to the console"),
_ => {
return Err(new_nitro_cli_failure!(
"Failed to connect to the console",
NitroCliErrorEnum::SocketError
))
}
}
}
_ => {
return Err(new_nitro_cli_failure!(
"Failed to connect to the console",
NitroCliErrorEnum::SocketError
))
}
},
};
Ok(Console { fd: socket_fd })
}
/// Read a chunk of raw data from the console and output it.
pub fn read_to(
&self,
output: &mut dyn Write,
disconnect_timeout_sec: Option<u64>,
) -> NitroCliResult<()> {
// Initialize variables
let epoll = Epoll::new().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to create epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
// Add console fd to epoll
epoll
.ctl(
ControlOperation::Add,
self.fd,
EpollEvent::new(EventSet::IN, self.fd as u64),
)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to add fd to epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
// If the function call provides a disconnect timeout, create a timerfd,
// arm it and then add it to epoll
if let Some(disconnect_timeout) = disconnect_timeout_sec {
// Create timerfd
let mut timerfd = TimerFd::new().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to initialize timerfd: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
// Arm timerfd with disconnect_timeout seconds
timerfd
.reset(Duration::from_secs(disconnect_timeout), None)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to arm timerfd: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
// Add timerfd fd to epoll
let timerfd_fd = timerfd.into_raw_fd();
epoll
.ctl(
ControlOperation::Add,
timerfd_fd,
EpollEvent::new(EventSet::IN, timerfd_fd as u64),
)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to add fd to epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
}
// Allow only one epoll event to happen at a given time
let mut events = [EpollEvent::default(); 1];
loop {
// Wait for kernel notification that one of the fds is available
let num_events = epoll.wait(-1, &mut events).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to wait epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
// Check if any event triggered, because an interrupt could unblock the wait
// without any of the requested events to occur
if num_events == 1 {
match events[0].fd() {
// Check if console fd triggered
fd if fd == self.fd => {
let mut buffer = [0u8; BUFFER_SIZE];
let size = read(self.fd, &mut buffer).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to read data from the console: {e:?}"),
NitroCliErrorEnum::EnclaveConsoleReadError
)
})?;
if size == 0 {
break;
}
if size > 0 {
output.write(&buffer[..size]).map_err(|e| {
new_nitro_cli_failure!(
&format!(
"Failed to write data from the \
console to the given stream: {e:?}"
),
NitroCliErrorEnum::EnclaveConsoleWriteOutputError
)
})?;
}
}
// Check if timerfd triggered
_ => break,
}
}
}
Ok(())
}
/// Read a chunk of raw data to a buffer.
pub fn read_to_buffer(&self, buf: &mut Vec<u8>, duration: Duration) -> NitroCliResult<()> {
let sys_time = SystemTime::now();
loop {
let mut buffer = [0u8; BUFFER_SIZE];
let result = read(self.fd, &mut buffer);
if let Ok(size) = result {
if size > 0 {
let mut buf_vec = buffer.to_vec();
buf_vec.truncate(size);
(*buf).append(&mut buf_vec);
}
}
sleep(Duration::from_millis(TIMEOUT));
let time_elapsed = sys_time.elapsed().map_err(|err| {
new_nitro_cli_failure!(
&format!("System time moved backwards: {err:?}"),
NitroCliErrorEnum::ClockSkewError
)
})?;
if time_elapsed >= duration {
break;
}
}
Ok(())
}
}
/// Set a timeout on a vsock connection.
fn vsock_set_connect_timeout(fd: RawFd, millis: i64) -> NitroCliResult<()> {
let timeval = TimeVal::milliseconds(millis);
let ret = unsafe {
libc::setsockopt(
fd,
libc::AF_VSOCK,
SO_VM_SOCKETS_CONNECT_TIMEOUT,
&timeval as *const _ as *const c_void,
size_of::<TimeVal>() as u32,
)
};
match ret {
0 => Ok(()),
_ => Err(new_nitro_cli_failure!(
&format!("Failed to configure SO_VM_SOCKETS_CONNECT_TIMEOUT: {ret:?}"),
NitroCliErrorEnum::SocketConnectTimeoutError
)),
}
}
/// Computes the ceil of `lhs / rhs`. Used for reporting the lower
/// limit of enclave memory based on the EIF file size.
pub fn ceil_div(lhs: u64, rhs: u64) -> u64 {
if rhs == 0 {
return u64::MAX;
}
lhs / rhs
+ match lhs % rhs {
0 => 0,
_ => 1,
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/main.rs | src/main.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
//! This is the entry point for the Nitro CLI process.
extern crate lazy_static;
use clap::{Arg, Command};
use log::info;
use std::os::unix::net::UnixStream;
use nitro_cli::common::commands_parser::{
BuildEnclavesArgs, ConsoleArgs, DescribeEnclavesArgs, EmptyArgs, ExplainArgs, PcrArgs,
RunEnclavesArgs, SignEifArgs, TerminateEnclavesArgs,
};
use nitro_cli::common::document_errors::explain_error;
use nitro_cli::common::json_output::{EnclaveDescribeInfo, EnclaveRunInfo, EnclaveTerminateInfo};
use nitro_cli::common::{
enclave_proc_command_send_single, logger, NitroCliErrorEnum, NitroCliFailure, NitroCliResult,
};
use nitro_cli::common::{EnclaveProcessCommandType, ExitGracefully};
use nitro_cli::enclave_proc::resource_manager::NE_ENCLAVE_DEBUG_MODE;
use nitro_cli::enclave_proc_comm::{
enclave_proc_command_send_all, enclave_proc_connect_to_single, enclave_proc_get_cid,
enclave_proc_get_flags, enclave_proc_spawn, enclave_process_handle_all_replies,
};
use nitro_cli::{
build_enclaves, console_enclaves, create_app, describe_eif, get_all_enclave_names,
get_file_pcr, new_enclave_name, new_nitro_cli_failure, sign_eif, terminate_all_enclaves,
};
const RUN_ENCLAVE_STR: &str = "Run Enclave";
const DESCRIBE_ENCLAVE_STR: &str = "Describe Enclave";
const DESCRIBE_EIF_STR: &str = "Describe EIF";
const TERMINATE_ENCLAVE_STR: &str = "Terminate Enclave";
const TERMINATE_ALL_ENCLAVES_STR: &str = "Terminate All Enclaves";
const BUILD_ENCLAVE_STR: &str = "Build Enclave";
const ENCLAVE_CONSOLE_STR: &str = "Enclave Console";
const EXPLAIN_ERR_STR: &str = "Explain Error";
const NEW_NAME_STR: &str = "New Enclave Name";
const FILE_PCR_STR: &str = "File PCR";
const SIGN_EIF_STR: &str = "Sign EIF";
/// *Nitro CLI* application entry point.
fn main() {
let version_str = env!("CARGO_PKG_VERSION");
// Command-line specification for the Nitro CLI.
let mut app = create_app!();
app = app.version(version_str);
let args = app.get_matches();
let logger = logger::init_logger()
.map_err(|e| e.set_action("Logger initialization".to_string()))
.ok_or_exit_with_errno(None);
let mut replies: Vec<UnixStream> = vec![];
logger
.update_logger_id(format!("nitro-cli:{}", std::process::id()).as_str())
.map_err(|e| e.set_action("Update CLI Process Logger ID".to_string()))
.ok_or_exit_with_errno(None);
info!("Start Nitro CLI");
match args.subcommand() {
Some(("run-enclave", args)) => {
let mut run_args = RunEnclavesArgs::new_with(args)
.map_err(|err| {
err.add_subaction("Failed to construct RunEnclave arguments".to_string())
.set_action(RUN_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
let mut comm = enclave_proc_spawn(&logger)
.map_err(|err| {
err.add_subaction("Failed to spawn enclave process".to_string())
.set_action(RUN_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
let names = get_all_enclave_names()
.map_err(|e| {
e.add_subaction("Failed to handle all enclave process replies".to_string())
.set_action("Get Enclaves Name".to_string())
})
.ok_or_exit_with_errno(None);
run_args.enclave_name = Some(
new_enclave_name(run_args.clone(), names)
.map_err(|err| {
err.add_subaction("Failed to assign a new enclave name".to_string())
.set_action(NEW_NAME_STR.to_string())
})
.ok_or_exit_with_errno(None),
);
enclave_proc_command_send_single(
EnclaveProcessCommandType::Run,
Some(&run_args),
&mut comm,
)
.map_err(|e| {
e.add_subaction("Failed to send single command".to_string())
.set_action(RUN_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
info!("Sent command: Run");
replies.push(comm);
let run_info = enclave_process_handle_all_replies::<EnclaveRunInfo>(
&mut replies,
0,
false,
vec![0],
)
.map_err(|e| {
e.add_subaction("Failed to handle all enclave process replies".to_string())
.set_action(RUN_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
let enclave_cid = run_info
.first()
.map(|run_info| run_info.enclave_cid)
.ok_or_else(|| {
new_nitro_cli_failure!(
"Enclave CID was not reported",
NitroCliErrorEnum::EnclaveConsoleConnectionFailure
)
})
.ok_or_exit_with_errno(None);
if run_args.attach_console {
console_enclaves(enclave_cid, None)
.map_err(|e| {
e.add_subaction("Failed to connect to enclave console".to_string())
.set_action(ENCLAVE_CONSOLE_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
}
Some(("terminate-enclave", args)) => {
if args.get_flag("all") {
terminate_all_enclaves()
.map_err(|e| {
e.add_subaction("Failed to terminate all running enclaves".to_string())
.set_action(TERMINATE_ALL_ENCLAVES_STR.to_string())
})
.ok_or_exit_with_errno(None);
} else {
let terminate_args = TerminateEnclavesArgs::new_with(args)
.map_err(|err| {
err.add_subaction(
"Failed to construct TerminateEnclave arguments".to_string(),
)
.set_action(TERMINATE_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
let mut comm = enclave_proc_connect_to_single(&terminate_args.enclave_id)
.map_err(|e| {
e.add_subaction("Failed to connect to enclave process".to_string())
.set_action(TERMINATE_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
// TODO: Replicate output of old CLI on invalid enclave IDs.
enclave_proc_command_send_single::<EmptyArgs>(
EnclaveProcessCommandType::Terminate,
None,
&mut comm,
)
.map_err(|e| {
e.add_subaction("Failed to send single command".to_string())
.set_action(TERMINATE_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
info!("Sent command: Terminate");
replies.push(comm);
enclave_process_handle_all_replies::<EnclaveTerminateInfo>(
&mut replies,
0,
false,
vec![0],
)
.map_err(|e| {
e.add_subaction("Failed to handle all enclave process replies".to_string())
.set_action(TERMINATE_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
}
Some(("describe-enclaves", args)) => {
let describe_args = DescribeEnclavesArgs::new_with(args);
let (comms, comm_errors) = enclave_proc_command_send_all::<DescribeEnclavesArgs>(
EnclaveProcessCommandType::Describe,
Some(&describe_args),
)
.map_err(|e| {
e.add_subaction(
"Failed to send DescribeEnclave command to all enclave processes".to_string(),
)
.set_action(DESCRIBE_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
info!("Sent command: Describe");
replies.extend(comms);
enclave_process_handle_all_replies::<EnclaveDescribeInfo>(
&mut replies,
comm_errors,
true,
vec![0],
)
.map_err(|e| {
e.add_subaction("Failed to handle all enclave process replies".to_string())
.set_action(DESCRIBE_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
Some(("build-enclave", args)) => {
let build_args = BuildEnclavesArgs::new_with(args)
.map_err(|e| {
e.add_subaction("Failed to construct BuildEnclave arguments".to_string())
.set_action(BUILD_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
build_enclaves(build_args)
.map_err(|e| {
e.add_subaction("Failed to build enclave".to_string())
.set_action(BUILD_ENCLAVE_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
Some(("describe-eif", args)) => {
let eif_path = args
.get_one::<String>("eif-path")
.map(String::from)
.unwrap();
describe_eif(eif_path)
.map_err(|e| {
e.add_subaction("Failed to describe EIF".to_string())
.set_action(DESCRIBE_EIF_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
Some(("console", args)) => {
let console_args = ConsoleArgs::new_with(args)
.map_err(|e| {
e.add_subaction("Failed to construct Console arguments".to_string())
.set_action(ENCLAVE_CONSOLE_STR.to_string())
})
.ok_or_exit_with_errno(None);
let enclave_cid = enclave_proc_get_cid(&console_args.enclave_id)
.map_err(|e| {
e.add_subaction("Failed to retrieve enclave CID".to_string())
.set_action(ENCLAVE_CONSOLE_STR.to_string())
})
.ok_or_exit_with_errno(None);
let enclave_flags = enclave_proc_get_flags(&console_args.enclave_id)
.map_err(|e| {
e.add_subaction("Failed to retrieve enclave flags".to_string())
.set_action(ENCLAVE_CONSOLE_STR.to_string())
})
.ok_or_exit_with_errno(None);
if enclave_flags & NE_ENCLAVE_DEBUG_MODE == 0 {
let _result : NitroCliResult<()> = Err(new_nitro_cli_failure!(
"The enclave was not started with the debug flag set, include '--debug-mode' in the run-enclave command",
NitroCliErrorEnum::EnclaveConsoleConnectionFailure
))
.map_err(|e| {
e.add_subaction("Failed to connect to enclave console".to_string())
.set_action(ENCLAVE_CONSOLE_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
console_enclaves(enclave_cid, console_args.disconnect_timeout_sec)
.map_err(|e| {
e.add_subaction("Failed to connect to enclave console".to_string())
.set_action(ENCLAVE_CONSOLE_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
Some(("pcr", args)) => {
let pcr_args = PcrArgs::new_with(args)
.map_err(|e| {
e.add_subaction("Failed to construct PCR arguments".to_string())
.set_action(FILE_PCR_STR.to_string())
})
.ok_or_exit_with_errno(None);
get_file_pcr(pcr_args.path, pcr_args.pcr_type)
.map_err(|e| {
e.add_subaction("Failed to get the PCR hash of the file contents".to_string())
.set_action(FILE_PCR_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
Some(("explain", args)) => {
let explain_args = ExplainArgs::new_with(args)
.map_err(|e| {
e.add_subaction("Failed to construct Explain arguments".to_string())
.set_action(EXPLAIN_ERR_STR.to_string())
})
.ok_or_exit_with_errno(None);
explain_error(explain_args.error_code_str);
}
Some(("sign-eif", args)) => {
let sign_args = SignEifArgs::new_with(args)
.map_err(|e| {
e.add_subaction("Failed to construct SignEIF arguments".to_string())
.set_action(SIGN_EIF_STR.to_string())
})
.ok_or_exit_with_errno(None);
sign_eif(sign_args)
.map_err(|e| {
e.add_subaction("Failed to sign EIF".to_string())
.set_action(SIGN_EIF_STR.to_string())
})
.ok_or_exit_with_errno(None);
}
Some((&_, _)) | None => (),
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc_comm.rs | src/enclave_proc_comm.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use log::{debug, info};
use nix::sys::epoll;
use nix::sys::epoll::{EpollEvent, EpollFlags, EpollOp};
use nix::unistd::*;
use serde::de::DeserializeOwned;
use serde::Serialize;
use std::borrow::BorrowMut;
use std::fs;
use std::io::ErrorKind;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net::UnixStream;
use crate::common::commands_parser::EmptyArgs;
use crate::common::logger::EnclaveProcLogWriter;
use crate::common::{
enclave_proc_command_send_single, get_socket_path, get_sockets_dir_path, notify_error,
read_u64_le, receive_from_stream,
};
use crate::common::{
EnclaveProcessCommandType, EnclaveProcessReply, NitroCliErrorEnum, NitroCliFailure,
NitroCliResult,
};
use crate::common::{ENCLAVE_PROC_WAIT_TIMEOUT_MSEC, MSG_ENCLAVE_CONFIRM};
use crate::enclave_proc::enclave_process_run;
use crate::new_nitro_cli_failure;
/// Spawn an enclave process and wait until it has detached and has
/// taken ownership of its communication socket.
pub fn enclave_proc_spawn(logger: &EnclaveProcLogWriter) -> NitroCliResult<UnixStream> {
let (cli_socket, enclave_proc_socket) = UnixStream::pair().map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not create a socket pair: {e:?}"),
NitroCliErrorEnum::SocketPairCreationFailure
)
})?;
// Prevent the descriptor from being closed when calling exec().
let enclave_proc_fd = enclave_proc_socket.as_raw_fd();
unsafe {
let flags = libc::fcntl(enclave_proc_fd, libc::F_GETFD);
libc::fcntl(enclave_proc_fd, libc::F_SETFD, flags & !libc::FD_CLOEXEC);
}
// Spawn an intermediate child process. This will fork again in order to
// create the detached enclave process.
// Safety: enclave_proc_spawn is called early on and nitro-cli is not that this point
// multi-threaded, which should prevent the issues around forking. However,
// the safe way to do this would be to use a safe alternative such as Command to
// re-execute this same process with another set of parameters.
let fork_status = unsafe { fork() };
if let Ok(ForkResult::Child) = fork_status {
// This is our intermediate child process.
enclave_process_run(enclave_proc_socket, logger);
} else {
fork_status.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to create intermediate process: {e:?}"),
NitroCliErrorEnum::ProcessSpawnFailure
)
})?;
}
// The enclave process will open a socket named "<enclave_id>.sock", but this
// will only become available after the enclave has been successfully launched.
// Until then, we can only use the pre-initialized socket pair to communicate
// with the new process.
Ok(cli_socket)
}
/// Connect to all existing enclave processes, returning a connection to each.
pub fn enclave_proc_connect_to_all() -> NitroCliResult<Vec<UnixStream>> {
let paths = fs::read_dir(get_sockets_dir_path()).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to access sockets directory: {e:?}"),
NitroCliErrorEnum::ReadFromDiskFailure
)
})?;
Ok(paths
.filter_map(|path| path.ok())
.map(|path| path.path())
.filter(|path| !path.is_dir())
.filter_map(|path| {
// Get the file path string.
if let Some(path_str) = path.to_str() {
// Enclave process sockets are named "<enclave_id>.sock".
if !path_str.ends_with(".sock") {
return None;
}
// At this point we have found a potential socket.
match UnixStream::connect(path_str) {
Ok(conn) => {
// We have connected to an enclave process.
info!("Connected to: {}", path_str);
return Some(conn);
}
Err(e) => {
if e.kind() == ErrorKind::PermissionDenied {
// Don't touch the socket if connection failed due to insufficient permissions.
info!("Connection to '{}' failed: {}", path_str, e);
} else {
// For all other connection errors, assume the socket is stale and delete it.
info!("Deleting stale socket: {}", path_str);
let _ = fs::remove_file(path_str).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to delete socket: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![path_str, "Remove"])
});
}
}
}
}
None
})
.collect())
}
/// Open a connection to an enclave-specific socket.
pub fn enclave_proc_connect_to_single(enclave_id: &str) -> NitroCliResult<UnixStream> {
let socket_path = get_socket_path(enclave_id).map_err(|e| {
e.add_subaction("Connect to specific enclave process".to_string())
.set_error_code(NitroCliErrorEnum::SocketError)
})?;
UnixStream::connect(socket_path).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to connect to specific enclave process: {e:?}"),
NitroCliErrorEnum::SocketError
)
})
}
/// Broadcast a command to all available enclave processes.
pub fn enclave_proc_command_send_all<T>(
cmd: EnclaveProcessCommandType,
args: Option<&T>,
) -> NitroCliResult<(Vec<UnixStream>, usize)>
where
T: Serialize,
{
// Open a connection to each valid socket.
let mut replies: Vec<UnixStream> = vec![];
let epoll_fd = epoll::epoll_create().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to create epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
let comms: Vec<NitroCliResult<()>> = enclave_proc_connect_to_all()
.map_err(|e| {
e.add_subaction("Failed to send command to all enclave processes".to_string())
})?
.into_iter()
.map(|mut socket| {
// Send the command.
enclave_proc_command_send_single(cmd, args, socket.borrow_mut())?;
let raw_fd = socket.into_raw_fd();
let mut process_evt = EpollEvent::new(EpollFlags::EPOLLIN, raw_fd as u64);
// Add each valid connection to epoll.
epoll::epoll_ctl(epoll_fd, EpollOp::EpollCtlAdd, raw_fd, &mut process_evt).map_err(
|e| {
new_nitro_cli_failure!(
&format!("Failed to register socket with epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
},
)?;
Ok(())
})
.collect();
// Don't proceed unless at least one connection has been established.
if comms.is_empty() {
return Ok((vec![], 0));
}
// Get the number of transmission errors.
let mut num_errors = comms.iter().filter(|result| result.is_err()).count();
// Get the number of expected replies.
let mut num_replies_expected = comms.len() - num_errors;
let mut events = [EpollEvent::empty(); 1];
while num_replies_expected > 0 {
let num_events = loop {
match epoll::epoll_wait(epoll_fd, &mut events[..], ENCLAVE_PROC_WAIT_TIMEOUT_MSEC) {
Ok(num_events) => break num_events,
Err(nix::errno::Errno::EINTR) => continue,
// TODO: Handle bad descriptors (closed remote connections).
Err(e) => {
return Err(new_nitro_cli_failure!(
&format!("Failed to wait on epoll: {e:?}"),
NitroCliErrorEnum::EpollError
))
}
}
};
// We will handle this reply, irrespective of its status (successful or failed).
num_replies_expected -= 1;
// Check if a time-out has occurred.
if num_events == 0 {
continue;
}
let input_stream_raw_fd = events[0].data() as RawFd;
let mut input_stream = unsafe { UnixStream::from_raw_fd(input_stream_raw_fd) };
// Handle the reply we received.
if let Ok(reply) = read_u64_le(&mut input_stream) {
if reply == MSG_ENCLAVE_CONFIRM {
debug!("Got confirmation from {:?}", input_stream);
replies.push(input_stream);
}
}
epoll::epoll_ctl(
epoll_fd,
EpollOp::EpollCtlDel,
input_stream_raw_fd,
Option::None,
)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to remove socket from epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
}
// Update the number of connections that have yielded errors.
num_errors = comms.len() - replies.len();
Ok((replies, num_errors))
}
/// Print the output from a single enclave process.
fn enclave_proc_handle_output<T>(conn: &mut UnixStream) -> (Option<T>, Option<i32>)
where
T: DeserializeOwned,
{
let mut stdout_str = String::new();
let mut status: Option<i32> = None;
// The contents meant for standard output must always form a valid JSON object.
while let Ok(reply) = receive_from_stream::<EnclaveProcessReply>(conn) {
match reply {
EnclaveProcessReply::StdOutMessage(msg) => stdout_str.push_str(&msg),
EnclaveProcessReply::StdErrMessage(msg) => eprint!("{msg}"),
EnclaveProcessReply::Status(status_code) => status = Some(status_code),
}
}
// Shut the connection down.
match conn.shutdown(std::net::Shutdown::Both) {
Ok(()) => (),
Err(e) => {
notify_error(&format!("Failed to shut connection down: {e}"));
status = Some(-1);
}
}
// Decode the JSON object.
let json_obj = serde_json::from_str::<T>(&stdout_str).ok();
(json_obj, status)
}
/// Fetch JSON objects and statuses from all connected enclave processes.
pub fn enclave_proc_handle_outputs<T>(conns: &mut [UnixStream]) -> Vec<(T, i32)>
where
T: DeserializeOwned,
{
let mut objects: Vec<(T, i32)> = Vec::new();
for conn in conns.iter_mut() {
// We only count connections that have yielded a valid JSON object and a status
let (object, status) = enclave_proc_handle_output::<T>(conn);
if let Some(object) = object {
if let Some(status) = status {
objects.push((object, status));
}
}
}
objects
}
/// Process reply messages from all connected enclave processes.
pub fn enclave_process_handle_all_replies<T>(
replies: &mut [UnixStream],
prev_failed_conns: usize,
print_as_vec: bool,
allowed_return_codes: Vec<i32>,
) -> NitroCliResult<Vec<T>>
where
T: Clone + DeserializeOwned + Serialize,
{
let objects = enclave_proc_handle_outputs::<T>(replies);
let failed_conns = prev_failed_conns + replies.len() - objects.len();
// Print a message if we have any connections that have failed.
if failed_conns > 0 {
eprintln!("Failed connections: {failed_conns}");
}
// Output the received objects either individually or as an array.
if print_as_vec {
let obj_vec: Vec<T> = objects.iter().map(|v| v.0.clone()).collect();
println!(
"{}",
serde_json::to_string_pretty(&obj_vec).map_err(|e| new_nitro_cli_failure!(
&format!("Failed to print JSON vector: {e:?}"),
NitroCliErrorEnum::SerdeError
))?
);
} else {
for object in objects.iter().map(|v| v.0.clone()) {
println!(
"{}",
serde_json::to_string_pretty(&object).map_err(|e| new_nitro_cli_failure!(
&format!("Failed to print JSON object: {e:?}"),
NitroCliErrorEnum::SerdeError
))?
);
}
}
// We fail on any error codes or failed connections.
if objects
.iter()
.filter(|v| !allowed_return_codes.contains(&v.1))
.count()
> 0
{
return Err(new_nitro_cli_failure!(
&format!(
"Failed to execute {} enclave process commands",
objects
.iter()
.filter(|v| !allowed_return_codes.contains(&v.1))
.count()
),
NitroCliErrorEnum::EnclaveProcessCommandNotExecuted
));
} else if failed_conns > 0 {
return Err(new_nitro_cli_failure!(
&format!("Failed to connect to {failed_conns} enclave processes"),
NitroCliErrorEnum::EnclaveProcessConnectionFailure
));
}
Ok(objects.into_iter().map(|(o, _)| o).collect())
}
/// Obtain an enclave's CID given its full ID.
pub fn enclave_proc_get_cid(enclave_id: &str) -> NitroCliResult<u64> {
let mut comm = enclave_proc_connect_to_single(enclave_id)
.map_err(|e| e.add_subaction("Failed to connect to enclave process".to_string()))?;
// TODO: Replicate output of old CLI on invalid enclave IDs.
enclave_proc_command_send_single::<EmptyArgs>(
EnclaveProcessCommandType::GetEnclaveCID,
None,
&mut comm,
)
.map_err(|e| e.add_subaction("Failed to send CID request to enclave process".to_string()))?;
info!("Sent command: GetEnclaveCID");
let enclave_cid = read_u64_le(&mut comm)
.map_err(|e| e.add_subaction(String::from("Failed to read CID from enclave process")))?;
// We got the CID, so shut the connection down.
comm.shutdown(std::net::Shutdown::Both).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to shut down connection after obtaining enclave CID: {e:?}"),
NitroCliErrorEnum::SocketError
)
})?;
Ok(enclave_cid)
}
/// Obtain an enclave's flags given its full ID.
pub fn enclave_proc_get_flags(enclave_id: &str) -> NitroCliResult<u64> {
let mut comm = enclave_proc_connect_to_single(enclave_id)
.map_err(|e| e.add_subaction("Failed to connect to enclave process".to_string()))?;
// TODO: Replicate output of old CLI on invalid enclave IDs.
enclave_proc_command_send_single::<EmptyArgs>(
EnclaveProcessCommandType::GetEnclaveFlags,
None,
&mut comm,
)
.map_err(|e| e.add_subaction("Failed to send flags request to enclave process".to_string()))?;
info!("Sent command: GetEnclaveFlags");
let enclave_flags = read_u64_le(&mut comm)
.map_err(|e| e.add_subaction(String::from("Failed to read flags from enclave process")))?;
// We got the flags, so shut the connection down.
comm.shutdown(std::net::Shutdown::Both).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to shut down connection after obtaining enclave flags: {e:?}"),
NitroCliErrorEnum::SocketError
)
})?;
Ok(enclave_flags)
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/common/commands_parser.rs | src/common/commands_parser.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use clap::ArgMatches;
use libc::VMADDR_CID_HOST;
#[cfg(test)]
use libc::VMADDR_CID_LOCAL;
use serde::{Deserialize, Serialize};
use std::fs::File;
use std::str::FromStr;
use crate::common::{NitroCliErrorEnum, NitroCliFailure, NitroCliResult, VMADDR_CID_PARENT};
use crate::get_id_by_name;
use crate::new_nitro_cli_failure;
use crate::utils::PcrType;
/// The arguments used by the `run-enclave` command.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RunEnclavesArgs {
/// The path to the enclave image file.
pub eif_path: String,
/// The optional enclave CID
pub enclave_cid: Option<u64>,
/// The amount of memory that will be given to the enclave.
pub memory_mib: u64,
/// An optional list of CPU IDs that will be given to the enclave.
pub cpu_ids: Option<Vec<u32>>,
/// A flag indicating if the enclave will be started in debug mode.
#[serde(default)]
pub debug_mode: bool,
/// Attach to the console immediately if using debug mode.
#[serde(default)]
pub attach_console: bool,
/// The number of CPUs that the enclave will receive.
pub cpu_count: Option<u32>,
/// Enclave name set by the user.
pub enclave_name: Option<String>,
}
impl RunEnclavesArgs {
/// Construct a new `RunEnclavesArgs` instance from the given command-line arguments.
pub fn new_with(args: &ArgMatches) -> NitroCliResult<Self> {
if let Some(config_file) = args.get_one::<String>("config") {
let file = File::open(config_file).map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to open config file: {err:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![config_file, "Open"])
})?;
let mut json: RunEnclavesArgs = serde_json::from_reader(file).map_err(|err| {
new_nitro_cli_failure!(
&format!("Invalid JSON format for config file: {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?;
if json.cpu_count.is_none() && json.cpu_ids.is_none() {
return Err(new_nitro_cli_failure!(
"Missing both `cpu-count` and `cpu-ids`",
NitroCliErrorEnum::MissingArgument
));
}
if json.cpu_count.is_some() && json.cpu_ids.is_some() {
return Err(new_nitro_cli_failure!(
"`cpu-count` and `cpu-ids` cannot be used together",
NitroCliErrorEnum::ConflictingArgument
));
}
// attach_console implies debug_mode
json.debug_mode = json.debug_mode || json.attach_console;
Ok(json)
} else {
Ok(RunEnclavesArgs {
cpu_count: parse_cpu_count(args)
.map_err(|err| err.add_subaction("Parse CPU count".to_string()))?,
eif_path: parse_eif_path(args)
.map_err(|err| err.add_subaction("Parse EIF path".to_string()))?,
enclave_cid: parse_enclave_cid(args)
.map_err(|err| err.add_subaction("Parse enclave CID".to_string()))?,
memory_mib: parse_memory(args)
.map_err(|err| err.add_subaction("Parse memory".to_string()))?,
cpu_ids: parse_cpu_ids(args)
.map_err(|err| err.add_subaction("Parse CPU IDs".to_string()))?,
debug_mode: debug_mode(args),
attach_console: attach_console(args),
enclave_name: parse_enclave_name(args)
.map_err(|err| err.add_subaction("Parse enclave name".to_string()))?,
})
}
}
}
/// The arguments used by the `build-enclave` command.
#[derive(Debug, Clone)]
pub struct BuildEnclavesArgs {
/// The URI to the Docker image.
pub docker_uri: String,
/// The directory containing the Docker image.
pub docker_dir: Option<String>,
/// The path where the enclave image file will be written to.
pub output: String,
/// The path to the signing certificate for signed enclaves.
pub signing_certificate: Option<String>,
/// KMS key ARN or path to the private key for signed enclaves.
pub private_key: Option<String>,
/// The name of the enclave image.
pub img_name: Option<String>,
/// The version of the enclave image.
pub img_version: Option<String>,
/// The path to custom metadata JSON file
pub metadata: Option<String>,
}
impl BuildEnclavesArgs {
/// Construct a new `BuildEnclavesArgs` instance from the given command-line arguments.
pub fn new_with(args: &ArgMatches) -> NitroCliResult<Self> {
Ok(BuildEnclavesArgs {
docker_uri: parse_docker_tag(args).ok_or_else(|| {
new_nitro_cli_failure!(
"`docker-uri` argument not found",
NitroCliErrorEnum::MissingArgument
)
.add_info(vec!["docker-uri"])
})?,
docker_dir: parse_docker_dir(args),
output: parse_output(args).ok_or_else(|| {
new_nitro_cli_failure!(
"`output` argument not found",
NitroCliErrorEnum::MissingArgument
)
.add_info(vec!["output"])
})?,
signing_certificate: parse_signing_certificate(args),
private_key: parse_private_key(args),
img_name: parse_image_name(args),
img_version: parse_image_version(args),
metadata: parse_metadata(args),
})
}
}
/// The arguments used by the `terminate-enclave` command.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TerminateEnclavesArgs {
/// The ID of the enclave that is to be terminated.
pub enclave_id: String,
}
impl TerminateEnclavesArgs {
/// Construct a new `TerminateEnclavesArgs` instance from the given command-line arguments.
pub fn new_with(args: &ArgMatches) -> NitroCliResult<Self> {
// If a name is given, find the corresponding EnclaveID
match parse_enclave_name(args)
.map_err(|e| e.add_subaction("Parse Enclave Name".to_string()))?
{
Some(name) => Ok(TerminateEnclavesArgs {
enclave_id: get_id_by_name(name)
.map_err(|e| e.add_subaction("Get ID by Name".to_string()))?,
}),
None => Ok(TerminateEnclavesArgs {
enclave_id: parse_enclave_id(args)
.map_err(|e| e.add_subaction("Parse enclave ID".to_string()))?,
}),
}
}
}
/// The arguments used by the `console` command.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConsoleArgs {
/// The ID of the enclave whose console is to be shown.
pub enclave_id: String,
/// The time in seconds after the console disconnects from the enclave.
pub disconnect_timeout_sec: Option<u64>,
}
impl ConsoleArgs {
/// Construct a new `ConsoleArgs` instance from the given command-line arguments.
pub fn new_with(args: &ArgMatches) -> NitroCliResult<Self> {
// If a name is given, find the corresponding EnclaveID
let enclave_id = match parse_enclave_name(args)
.map_err(|e| e.add_subaction("Parse Enclave Name".to_string()))?
{
Some(name) => {
get_id_by_name(name).map_err(|e| e.add_subaction("Get ID by Name".to_string()))?
}
None => parse_enclave_id(args)
.map_err(|e| e.add_subaction("Parse enclave ID".to_string()))?,
};
Ok(ConsoleArgs {
enclave_id,
disconnect_timeout_sec: parse_disconnect_timeout(args)
.map_err(|e| e.add_subaction("Parse disconnect timeout".to_string()))?,
})
}
}
/// Empty set of arguments.
#[derive(Serialize, Deserialize)]
pub struct EmptyArgs {}
/// The arguments used by `describe-enclaves` command.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DescribeEnclavesArgs {
/// True if metadata is requested.
pub metadata: bool,
}
impl DescribeEnclavesArgs {
/// Construct a new `DescribeEnclavesArgs` instance from the given command-line arguments.
pub fn new_with(args: &ArgMatches) -> Self {
DescribeEnclavesArgs {
metadata: args.get_flag("metadata"),
}
}
}
/// The arguments used by the `explain` command.
#[derive(Debug, Clone)]
pub struct ExplainArgs {
/// The error code of the error to explain.
pub error_code_str: String,
}
impl ExplainArgs {
/// Construct a new `ExplainArgs` instance from the given command-line arguments.
pub fn new_with(args: &ArgMatches) -> NitroCliResult<Self> {
Ok(ExplainArgs {
error_code_str: parse_error_code_str(args)
.map_err(|e| e.add_subaction("Parse error code".to_string()))?,
})
}
}
/// The arguments used by `pcr` command
pub struct PcrArgs {
/// Path to the file needed for hashing
pub path: String,
/// The type of file we need to hash
pub pcr_type: PcrType,
}
impl PcrArgs {
/// Construct a new `PcrArgs` instance from the given command-line arguments.
pub fn new_with(args: &ArgMatches) -> NitroCliResult<Self> {
let (val_name, pcr_type) = match args.contains_id("signing-certificate") {
true => ("signing-certificate", PcrType::SigningCertificate),
false => ("input", PcrType::DefaultType),
};
let path = parse_file_path(args, val_name)
.map_err(|e| e.add_subaction("Parse PCR file".to_string()))?;
Ok(Self { path, pcr_type })
}
}
/// The arguments used by `sign-eif` command
#[derive(Debug, Clone)]
pub struct SignEifArgs {
/// Path to the EIF file needed for signing
pub eif_path: String,
/// The path to the signing certificate for signed enclaves.
pub signing_certificate: Option<String>,
/// ARN of the KMS key or path to the local private key for signed enclaves.
pub private_key: Option<String>,
}
impl SignEifArgs {
/// Construct a new `SignEifArgs` instance from the given command-line arguments.
pub fn new_with(args: &ArgMatches) -> NitroCliResult<Self> {
let signing_certificate = parse_signing_certificate(args);
let private_key = parse_private_key(args);
Ok(SignEifArgs {
eif_path: parse_eif_path(args)
.map_err(|e| e.add_subaction("Parse EIF path".to_string()))?,
signing_certificate,
private_key,
})
}
}
/// Parse file path to hash from the command-line arguments.
fn parse_file_path(args: &ArgMatches, val_name: &str) -> NitroCliResult<String> {
let path = args.get_one::<String>(val_name).ok_or_else(|| {
new_nitro_cli_failure!(
"`input` or `signing-certificate` argument not found",
NitroCliErrorEnum::MissingArgument
)
})?;
Ok(path.into())
}
#[derive(Debug)]
enum MemoryUnit {
Mebibytes,
Gibibytes,
Tebibytes,
}
#[derive(Debug)]
struct UnknownMemoryUnitErr;
impl MemoryUnit {
fn to_mebibytes(&self) -> u64 {
match self {
MemoryUnit::Mebibytes => 1,
MemoryUnit::Gibibytes => 1024,
MemoryUnit::Tebibytes => 1024 * 1024,
}
}
}
impl FromStr for MemoryUnit {
type Err = UnknownMemoryUnitErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"M" | "m" | "" => Ok(MemoryUnit::Mebibytes),
"G" | "g" => Ok(MemoryUnit::Gibibytes),
"T" | "t" => Ok(MemoryUnit::Tebibytes),
_ => Err(UnknownMemoryUnitErr),
}
}
}
/// Parse the requested amount of enclave memory from the command-line arguments.
/// It can be just a number like 123, or it can end in a size indicator like 100M or 10G.
/// If the size indicator is missing, it defaults to M.
/// If the size indicator is not M, G or T, it returns an error.
///
/// # Arguments
/// * `args` - The command-line arguments.
pub fn parse_memory(args: &ArgMatches) -> NitroCliResult<u64> {
let memory = args.get_one::<String>("memory").ok_or_else(|| {
new_nitro_cli_failure!(
"`memory` argument not found",
NitroCliErrorEnum::MissingArgument
)
})?;
let (num_str, size_str) = match memory.find(|c: char| !c.is_numeric()) {
Some(index) => memory.split_at(index),
None => (memory.as_str(), ""),
};
let num = num_str.parse::<u64>().map_err(|_| {
new_nitro_cli_failure!(
"`memory` argument does not contain a number",
NitroCliErrorEnum::InvalidArgument
)
.add_info(vec!["memory", memory])
})?;
let unit = size_str.parse::<MemoryUnit>().map_err(|_| {
new_nitro_cli_failure!(
"`memory` argument does not contain a valid size indicator",
NitroCliErrorEnum::InvalidArgument
)
.add_info(vec!["memory", memory])
})?;
Ok(num * unit.to_mebibytes())
}
/// Parse the Docker tag from the command-line arguments.
fn parse_docker_tag(args: &ArgMatches) -> Option<String> {
args.get_one::<String>("docker-uri").map(String::from)
}
/// Parse the Docker directory from the command-line arguments.
fn parse_docker_dir(args: &ArgMatches) -> Option<String> {
args.get_one::<String>("docker-dir").map(String::from)
}
/// Parse the enclave's required CID from the command-line arguments.
fn parse_enclave_cid(args: &ArgMatches) -> NitroCliResult<Option<u64>> {
let enclave_cid = if let Some(enclave_cid) = args.get_one::<String>("enclave-cid") {
let enclave_cid: u64 = enclave_cid.parse().map_err(|_| {
new_nitro_cli_failure!(
"`enclave-cid` is not a number",
NitroCliErrorEnum::InvalidArgument
)
.add_info(vec!["enclave-cid", enclave_cid])
})?;
// Do not use well-known CID values - 0, 1, 2 - as the enclave CID.
// VMADDR_CID_ANY = -1U
// VMADDR_CID_HYPERVISOR = 0
// VMADDR_CID_LOCAL = 1
// VMADDR_CID_HOST = 2
// Note: 0 is used as a placeholder to auto-generate a CID.
// <http://man7.org/linux/man-pages/man7/vsock.7.html>
if enclave_cid == 0 {
eprintln!("The enclave CID will be auto-generated as the provided CID is 0");
}
if enclave_cid > 0 && enclave_cid <= VMADDR_CID_HOST as u64 {
return Err(new_nitro_cli_failure!(
&format!("CID {enclave_cid} is a well-known CID, not to be used for enclaves"),
NitroCliErrorEnum::InvalidArgument
));
}
if enclave_cid == u32::MAX as u64 {
return Err(new_nitro_cli_failure!(
&format!("CID {enclave_cid} is a well-known CID, not to be used for enclaves"),
NitroCliErrorEnum::InvalidArgument
));
}
// Do not use the CID of the parent VM as the enclave CID.
if enclave_cid == VMADDR_CID_PARENT as u64 {
return Err(new_nitro_cli_failure!(
&format!(
"CID {enclave_cid} is the CID of the parent VM, not to be used for enclaves"
),
NitroCliErrorEnum::InvalidArgument
));
}
// 64-bit CIDs are not yet supported for the vsock device.
if enclave_cid > u32::MAX as u64 {
return Err(new_nitro_cli_failure!(
&format!(
"CID {enclave_cid} is higher than the maximum supported (u32 max) for a vsock device"
),
NitroCliErrorEnum::InvalidArgument
));
}
Some(enclave_cid)
} else {
None
};
Ok(enclave_cid)
}
/// Parse the enclave image file path from the command-line arguments.
fn parse_eif_path(args: &ArgMatches) -> NitroCliResult<String> {
let eif_path = args.get_one::<String>("eif-path").ok_or_else(|| {
new_nitro_cli_failure!(
"`eif-path` argument not found",
NitroCliErrorEnum::MissingArgument
)
})?;
Ok(eif_path.into())
}
/// Parse the enclave's ID from the command-line arguments.
fn parse_enclave_id(args: &ArgMatches) -> NitroCliResult<String> {
let enclave_id = args.get_one::<String>("enclave-id").ok_or_else(|| {
new_nitro_cli_failure!(
"`enclave-id` argument not found",
NitroCliErrorEnum::MissingArgument
)
})?;
Ok(enclave_id.into())
}
/// Parse the disconnect timeout from the command-line arguments.
fn parse_disconnect_timeout(args: &ArgMatches) -> NitroCliResult<Option<u64>> {
let disconnect_timeout = match args.get_one::<String>("disconnect-timeout") {
Some(arg) => Some(arg.parse::<u64>().map_err(|_| {
new_nitro_cli_failure!(
"`disconnect-timeout` argument can't be parsed as a number",
NitroCliErrorEnum::InvalidArgument
)
.add_info(vec!["disconnect-timeout", arg])
})?),
None => None,
};
Ok(disconnect_timeout)
}
/// Parse the list of requested CPU IDs from the command-line arguments.
fn parse_cpu_ids(args: &ArgMatches) -> NitroCliResult<Option<Vec<u32>>> {
args.get_many::<String>("cpu-ids")
.map(|values| {
values
.map(|id| {
id.parse().map_err(|_| {
new_nitro_cli_failure!(
"`cpu-id` is not a number",
NitroCliErrorEnum::InvalidArgument
)
.add_info(vec!["cpu-id", id])
})
})
.collect()
})
.transpose()
}
/// Parse the requested number of CPUs from the command-line arguments.
fn parse_cpu_count(args: &ArgMatches) -> NitroCliResult<Option<u32>> {
args.get_one::<String>("cpu-count")
.map(|count| {
count.parse().map_err(|_| {
new_nitro_cli_failure!(
"`cpu-count` is not a number",
NitroCliErrorEnum::InvalidArgument
)
.add_info(vec!["cpu-count", count])
})
})
.transpose()
}
/// Parse the path of an output file from the command-line arguments.
fn parse_output(args: &ArgMatches) -> Option<String> {
args.get_one::<String>("output-file").map(String::from)
}
/// Parse the debug-mode flag from the command-line arguments.
fn debug_mode(args: &ArgMatches) -> bool {
args.get_flag("debug-mode") || args.get_flag("attach-console")
}
/// Parse the attach-console flag from the command-line arguments.
fn attach_console(args: &ArgMatches) -> bool {
args.get_flag("attach-console")
}
/// Parse the enclave name from the command-line arguments.
fn parse_enclave_name(args: &ArgMatches) -> NitroCliResult<Option<String>> {
Ok(args.get_one::<String>("enclave-name").map(String::from))
}
fn parse_signing_certificate(args: &ArgMatches) -> Option<String> {
args.get_one::<String>("signing-certificate")
.map(String::from)
}
fn parse_private_key(args: &ArgMatches) -> Option<String> {
args.get_one::<String>("private-key").map(String::from)
}
fn parse_image_name(args: &ArgMatches) -> Option<String> {
args.get_one::<String>("image_name").map(String::from)
}
fn parse_image_version(args: &ArgMatches) -> Option<String> {
args.get_one::<String>("image_version").map(String::from)
}
fn parse_metadata(args: &ArgMatches) -> Option<String> {
args.get_one::<String>("metadata").map(String::from)
}
fn parse_error_code_str(args: &ArgMatches) -> NitroCliResult<String> {
let error_code_str = args.get_one::<String>("error-code").ok_or_else(|| {
new_nitro_cli_failure!(
"`error-code` argument not found",
NitroCliErrorEnum::MissingArgument
)
})?;
Ok(error_code_str.to_string())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::construct_error_message;
use crate::create_app;
use clap::{Arg, Command};
/// Parse the path of the JSON config file
fn parse_config_file(args: &ArgMatches) -> NitroCliResult<String> {
let config_file = args
.get_one::<String>("config")
.ok_or(new_nitro_cli_failure!(
"`config` argument not found",
NitroCliErrorEnum::MissingArgument
))?;
Ok(config_file.into())
}
#[test]
fn test_parse_memory() {
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256_mb",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_memory(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Invalid argument provided"))
}
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_memory(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert_eq!(result, Ok(256));
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"100M",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_memory(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert_eq!(result, Ok(100));
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"10G",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_memory(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert_eq!(result, Ok(10_240));
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"2T",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_memory(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert_eq!(result, Ok(2 * 1024 * 1024));
}
#[test]
fn test_parse_docker_tag() {
let app = create_app!();
let args = vec![
"nitro-cli",
"build-enclave",
"--docker-uri",
"mytag",
"--docker-dir",
"/home/user/non_existing_dir",
"--output-file",
"sample_eif.eif",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_docker_tag(
matches
.as_ref()
.unwrap()
.subcommand_matches("build-enclave")
.unwrap(),
);
assert!(result.is_some());
assert_eq!(result.unwrap(), "mytag");
}
#[test]
fn test_parse_docker_dir() {
let app = create_app!();
let args = vec![
"nitro-cli",
"build-enclave",
"--docker-uri",
"mytag",
"--docker-dir",
"/home/user/non_existing_dir",
"--output-file",
"sample_eif.eif",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_docker_dir(
matches
.as_ref()
.unwrap()
.subcommand_matches("build-enclave")
.unwrap(),
);
assert!(result.is_some());
assert_eq!(result.unwrap(), "/home/user/non_existing_dir");
}
#[test]
fn test_parse_enclave_cid_correct() {
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
"--enclave-cid",
"10",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_enclave_cid(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert!(result.is_ok());
if let Some(parsed_cid) = result.unwrap() {
assert_eq!(parsed_cid, 10);
}
}
#[test]
fn test_parse_enclave_cid_to_be_autogenerated() {
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
"--enclave-cid",
"0",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_enclave_cid(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert!(result.is_ok());
if let Some(parsed_cid) = result.unwrap() {
assert_eq!(parsed_cid, 0);
}
}
#[test]
fn test_parse_enclave_cid_str() {
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
"--enclave-cid",
"0x1g",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_enclave_cid(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Invalid argument provided"))
}
}
#[test]
fn test_parse_enclave_cid_well_known_cid_local() {
let app = create_app!();
let cid_local = VMADDR_CID_LOCAL.to_string();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
"--enclave-cid",
&cid_local,
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_enclave_cid(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Invalid argument provided"));
}
}
#[test]
fn test_parse_enclave_cid_well_known_cid_host() {
let app = create_app!();
let cid_host = VMADDR_CID_HOST.to_string();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
"--enclave-cid",
&cid_host,
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_enclave_cid(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Invalid argument provided"));
}
}
#[test]
fn test_parse_enclave_cid_parent_vm() {
let app = create_app!();
let parent_vm_cid = VMADDR_CID_PARENT.to_string();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
"--enclave-cid",
&parent_vm_cid,
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_enclave_cid(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Invalid argument provided"));
}
}
#[test]
fn test_parse_enclave_cid_negative() {
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
"--enclave-cid",
"-18",
];
let matches = app.try_get_matches_from(args);
// Error (got unexpected value ["-1"])
assert!(matches.is_err());
}
#[test]
fn test_parse_eif_path() {
let app = create_app!();
let args = vec![
"nitro-cli",
"run-enclave",
"--memory",
"256",
"--cpu-count",
"2",
"--eif-path",
"non_existing_eif.eif",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_eif_path(
matches
.as_ref()
.unwrap()
.subcommand_matches("run-enclave")
.unwrap(),
);
assert!(result.is_ok());
assert_eq!(result.unwrap(), "non_existing_eif.eif");
}
#[test]
fn test_parse_enclave_id() {
let app = create_app!();
let args = vec![
"nitro-cli",
"terminate-enclave",
"--enclave-id",
"i-0000-enc-1234",
];
let matches = app.try_get_matches_from(args);
assert!(matches.is_ok());
let result = parse_enclave_id(
matches
.as_ref()
.unwrap()
.subcommand_matches("terminate-enclave")
.unwrap(),
);
assert!(result.is_ok());
assert_eq!(result.unwrap(), "i-0000-enc-1234");
}
#[test]
fn test_parse_cpu_ids_correct() {
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | true |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/common/signal_handler.rs | src/common/signal_handler.rs | // Copyright 2020-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use log::warn;
use nix::sys::signal::{SigSet, Signal};
use nix::sys::signal::{SIGHUP, SIGINT, SIGQUIT, SIGTERM};
use std::os::unix::io::RawFd;
use std::thread;
use crate::common::{NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use crate::new_nitro_cli_failure;
/// The custom handler of POSIX signals.
pub struct SignalHandler {
sig_set: Option<SigSet>,
}
impl SignalHandler {
/// Create a new `SignalHandler` instance from the given list of signals.
pub fn new(signals: &[Signal]) -> Self {
let mut sig_set = SigSet::empty();
for signal in signals.iter() {
sig_set.add(*signal);
}
SignalHandler {
sig_set: Some(sig_set),
}
}
/// Create a new `SignalHandler` instance from a default list of signals.
pub fn new_with_defaults() -> Self {
SignalHandler::new(&[SIGINT, SIGQUIT, SIGTERM, SIGHUP])
}
/// Mask (block) all signals covered by the handler.
pub fn mask_all(self) -> NitroCliResult<Self> {
if let Some(set) = self.sig_set {
set.thread_block().map_err(|e| {
new_nitro_cli_failure!(
&format!("Masking signals covered by handler failed: {e:?}"),
NitroCliErrorEnum::SignalMaskingError
)
})?;
}
Ok(self)
}
/// Unmask (unblock) all signals covered by the handler.
pub fn unmask_all(self) -> NitroCliResult<Self> {
if let Some(set) = self.sig_set {
set.thread_unblock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Unmasking signals covered by handler failed: {e:?}"),
NitroCliErrorEnum::SignalUnmaskingError
)
})?;
}
Ok(self)
}
/// Start listening for events on a dedicated thread and handle them using the provided function.
pub fn start_handler(&mut self, fd: RawFd, handler: fn(RawFd, Signal) -> bool) {
if self.sig_set.is_none() {
return;
}
let thread_sig_set = self.sig_set.take().unwrap();
thread::spawn(move || {
let mut stop = false;
while !stop {
stop = match thread_sig_set.wait() {
Ok(signal) => handler(fd, signal),
Err(e) => {
warn!("Error listening for signals: {}", e);
true
}
};
}
});
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/common/logger.rs | src/common/logger.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use chrono::offset::{Local, Utc};
use chrono::DateTime;
use flexi_logger::writers::LogWriter;
use flexi_logger::{DeferredNow, Record};
use nix::unistd::Uid;
use std::env;
use std::fs::{File, OpenOptions, Permissions};
use std::io::{Error, Result, Write};
use std::ops::{Deref, DerefMut};
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use crate::common::{NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use crate::new_nitro_cli_failure;
/// The default logging level used by the logger.
const DEFAULT_LOG_LEVEL: &str = "info";
/// The environment variable which holds the path to the logging directory.
const LOGS_DIR_PATH_ENV_VAR: &str = "NITRO_CLI_LOGS_PATH";
/// The default path to the logging directory.
const LOGS_DIR_PATH: &str = "/var/log/nitro_enclaves";
/// The name of the output log file.
const LOG_FILE_NAME: &str = "nitro_enclaves.log";
/// A log writer which outputs its messages to a custom file. It also
/// allows the updating of its ID, in order to indicate which process
/// is actually logging a message. This implementation will also enable
/// synchronized logging to a centralized file for multiple enclaves.
#[derive(Clone)]
pub struct EnclaveProcLogWriter {
out_file: Arc<Mutex<File>>,
logger_id: Arc<Mutex<String>>,
}
impl EnclaveProcLogWriter {
/// Create a new log writer.
pub fn new() -> NitroCliResult<Self> {
// All logging shall be directed to a centralized file.
Ok(EnclaveProcLogWriter {
out_file: Arc::new(Mutex::new(
open_log_file(&get_log_file_path())
.map_err(|e| e.add_subaction("Failed to open log file".to_string()))?,
)),
logger_id: Arc::new(Mutex::new(String::new())),
})
}
/// Check if the log file is present and if it is not, (re)open it.
fn safe_open_log_file(&self) -> NitroCliResult<()> {
let log_path = &get_log_file_path();
if !log_path.exists() {
let new_file = open_log_file(log_path)
.map_err(|e| e.add_subaction(String::from("Failed to open log file")))?;
let mut file_ref = self.out_file.lock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to acquire lock: {e:?}"),
NitroCliErrorEnum::LockAcquireFailure
)
})?;
*file_ref.deref_mut() = new_file;
}
Ok(())
}
/// Update the logger ID (correlated with the process which is doing logging).
pub fn update_logger_id(&self, new_id: &str) -> NitroCliResult<()> {
let mut old_id = self.logger_id.lock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to acquire logger ID lock: {e:?}"),
NitroCliErrorEnum::LockAcquireFailure
)
})?;
old_id.deref_mut().clear();
old_id.deref_mut().push_str(new_id);
Ok(())
}
/// Generate a single message string.
fn create_msg(&self, now: &DateTime<Local>, record: &Record) -> NitroCliResult<String> {
// UTC timestamp according to RFC 2822
let timestamp = DateTime::<Utc>::from_naive_utc_and_offset(now.naive_utc(), Utc)
.to_rfc3339_opts(chrono::SecondsFormat::Millis, true);
let logger_id = self.logger_id.lock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to acquire logger ID lock: {e:?}"),
NitroCliErrorEnum::LockAcquireFailure
)
})?;
Ok(format!(
"[{}][{}][{}][{}:{}] {}\n",
logger_id.deref(),
record.level(),
timestamp,
record.file().unwrap_or("?"),
record.line().unwrap_or(0),
&record.args()
))
}
}
impl LogWriter for EnclaveProcLogWriter {
fn write(&self, now: &mut DeferredNow, record: &Record) -> Result<()> {
if self.safe_open_log_file().is_err() {
return Err(Error::other("Failed to safely open log file for writing"));
}
if let Ok(record_str) = self.create_msg(now.now(), record) {
if let Ok(mut out_file) = self.out_file.lock() {
out_file.deref_mut().write_all(record_str.as_bytes())?;
return Ok(());
}
return Err(Error::other("Failed to lock log file"));
}
Err(Error::other("Failed to create logger message"))
}
fn flush(&self) -> Result<()> {
Ok(())
}
fn max_log_level(&self) -> log::LevelFilter {
// The log level is either given in RUST_LOG or defaults to a specified value.
let level = std::env::var("RUST_LOG").unwrap_or_else(|_| DEFAULT_LOG_LEVEL.to_string());
match level.to_lowercase().as_ref() {
"info" => log::LevelFilter::Info,
"debug" => log::LevelFilter::Debug,
"warn" => log::LevelFilter::Warn,
"error" => log::LevelFilter::Error,
"trace" => log::LevelFilter::Trace,
_ => log::LevelFilter::Info,
}
}
}
/// Get the directory containing Nitro CLI related log files.
pub fn get_log_file_base_path() -> String {
match env::var(LOGS_DIR_PATH_ENV_VAR) {
Ok(env_path) => env_path,
Err(_) => LOGS_DIR_PATH.to_string(),
}
}
/// Get the path to the log file.
fn get_log_file_path() -> PathBuf {
Path::new(&get_log_file_base_path()).join(LOG_FILE_NAME)
}
/// Open a file at a given location for writing and appending.
fn open_log_file(file_path: &Path) -> NitroCliResult<File> {
let file = OpenOptions::new()
.create(true)
.append(true)
.read(false)
.open(file_path)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to open log file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![
file_path
.to_str()
.unwrap_or("Invalid unicode log file name"),
"Open",
])
})?;
let log_file_uid = Uid::from_raw(
file.metadata()
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to get log file metadata: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![
file_path
.to_str()
.unwrap_or("Invalid unicode log file name"),
"Get metadata",
])
})?
.uid(),
);
// The log file should be write-accessible to any user, since
// any user may launch a CLI instance. Only the file's owner
// may change its permissions.
if log_file_uid == Uid::current() {
let perms = Permissions::from_mode(0o766);
file.set_permissions(perms).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to change log file permissions: {e:?}"),
NitroCliErrorEnum::FilePermissionsError
)
})?;
}
Ok(file)
}
/// Initialize logging.
pub fn init_logger() -> NitroCliResult<EnclaveProcLogWriter> {
// The log file is "nitro-cli.log" and is stored in the NPE resources directory.
let log_writer = EnclaveProcLogWriter::new()?;
// Initialize logging with the new log writer.
flexi_logger::Logger::try_with_env_or_str(DEFAULT_LOG_LEVEL)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to initialize enclave process logger: {e:?}"),
NitroCliErrorEnum::LoggerError
)
})?
.log_to_writer(Box::new(log_writer.clone()))
.start()
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to initialize enclave process logger: {e:?}"),
NitroCliErrorEnum::LoggerError
)
})?;
// The log writer is provided for sharing between CLI-related processes.
Ok(log_writer)
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::os::unix::fs::PermissionsExt;
use tempfile::NamedTempFile;
/// Tests that `open_log_file()` creates a file
/// with the expected permissions.
#[test]
fn test_open_log_file() {
let file0 = NamedTempFile::new();
if let Ok(file0) = file0 {
let test_file_path = file0.path();
let f = open_log_file(test_file_path).unwrap();
let metadata = f.metadata();
assert!(metadata.is_ok());
if let Ok(metadata) = metadata {
assert!(metadata.is_file());
let permissions = metadata.permissions();
let mode = permissions.mode();
assert_eq!(mode & 0o777, 0o766);
}
}
}
/// Tests that the logger id is initially empty ("").
#[test]
fn test_init_logger() {
let tmp_log_dir: &str = "./.tmp_logs_init_logger";
// Get old environment variable value
let old_log_path = env::var(LOGS_DIR_PATH_ENV_VAR);
let path_existed = Path::new(tmp_log_dir).exists();
// Update environment variable value
env::set_var(LOGS_DIR_PATH_ENV_VAR, tmp_log_dir);
let _ = fs::create_dir(tmp_log_dir);
let log_writer = EnclaveProcLogWriter::new().unwrap();
let lock_result = log_writer.logger_id.lock();
assert!(lock_result.unwrap().is_empty());
if !path_existed {
// Remove whole `tmp_log_dir` if necessary
let _ = fs::remove_dir_all(tmp_log_dir);
} else {
// Only remove the log file
let _ = fs::remove_file(format!("{}/{}", tmp_log_dir, &LOG_FILE_NAME));
}
// Reset old environment variable value if necessary
if let Ok(old_log_path) = old_log_path {
env::set_var(LOGS_DIR_PATH_ENV_VAR, old_log_path);
}
}
/// Tests that the logger id is altered after issuing a
/// call to `update_logger_id()`.
#[test]
fn test_update_logger_id() {
let tmp_log_dir: &str = "./.tmp_logs_update_logger_id";
// Get old environment variable value
let old_log_path = env::var(LOGS_DIR_PATH_ENV_VAR);
let path_existed = Path::new(tmp_log_dir).exists();
// Update environment variable value
env::set_var(LOGS_DIR_PATH_ENV_VAR, tmp_log_dir);
let _ = fs::create_dir(tmp_log_dir);
let log_writer = EnclaveProcLogWriter::new().unwrap();
log_writer.update_logger_id("new-logger-id").unwrap();
let lock_result = log_writer.logger_id.lock();
assert!(lock_result.unwrap().eq("new-logger-id"));
log_writer.update_logger_id("").unwrap();
if !path_existed {
// Remove whole `tmp_log_dir` if necessary
let _ = fs::remove_dir_all(tmp_log_dir);
} else {
// Only remove the log file
let _ = fs::remove_file(format!("{}/{}", tmp_log_dir, &LOG_FILE_NAME));
}
// Reset old environment variable value if necessary
if let Ok(old_log_path) = old_log_path {
env::set_var(LOGS_DIR_PATH_ENV_VAR, old_log_path);
}
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/common/document_errors.rs | src/common/document_errors.rs | use lazy_static::lazy_static;
use std::collections::HashMap;
use crate::common::{NitroCliErrorEnum, NitroCliFailure};
lazy_static! {
/// Structure mapping enum Errors to a specific error code.
pub static ref ERROR_CODES: HashMap<NitroCliErrorEnum, &'static str> =
[
(NitroCliErrorEnum::UnspecifiedError, "E00"),
(NitroCliErrorEnum::MissingArgument, "E01"),
(NitroCliErrorEnum::ConflictingArgument, "E02"),
(NitroCliErrorEnum::InvalidArgument, "E03"),
(NitroCliErrorEnum::SocketPairCreationFailure, "E04"),
(NitroCliErrorEnum::ProcessSpawnFailure, "E05"),
(NitroCliErrorEnum::DaemonizeProcessFailure, "E06"),
(NitroCliErrorEnum::ReadFromDiskFailure, "E07"),
(NitroCliErrorEnum::UnusableConnectionError, "E08"),
(NitroCliErrorEnum::SocketCloseError, "E09"),
(NitroCliErrorEnum::SocketConnectTimeoutError, "E10"),
(NitroCliErrorEnum::SocketError, "E11"),
(NitroCliErrorEnum::EpollError, "E12"),
(NitroCliErrorEnum::InotifyError, "E13"),
(NitroCliErrorEnum::InvalidCommand, "E14"),
(NitroCliErrorEnum::LockAcquireFailure, "E15"),
(NitroCliErrorEnum::ThreadJoinFailure, "E16"),
(NitroCliErrorEnum::SerdeError, "E17"),
(NitroCliErrorEnum::FilePermissionsError, "E18"),
(NitroCliErrorEnum::FileOperationFailure, "E19"),
(NitroCliErrorEnum::InvalidCpuConfiguration, "E20"),
(NitroCliErrorEnum::NoSuchCpuAvailableInPool, "E21"),
(NitroCliErrorEnum::InsufficientCpus, "E22"),
(NitroCliErrorEnum::MalformedCpuId, "E23"),
(NitroCliErrorEnum::CpuError, "E24"),
(NitroCliErrorEnum::NoSuchHugepageFlag, "E25"),
(NitroCliErrorEnum::InsufficientMemoryRequested, "E26"),
(NitroCliErrorEnum::InsufficientMemoryAvailable, "E27"),
(NitroCliErrorEnum::InvalidEnclaveFd, "E28"),
(NitroCliErrorEnum::IoctlFailure, "E29"),
(NitroCliErrorEnum::IoctlImageLoadInfoFailure, "E30"),
(NitroCliErrorEnum::IoctlSetMemoryRegionFailure, "E31"),
(NitroCliErrorEnum::IoctlAddVcpuFailure, "E32"),
(NitroCliErrorEnum::IoctlEnclaveStartFailure, "E33"),
(NitroCliErrorEnum::MemoryOverflow, "E34"),
(NitroCliErrorEnum::EifParsingError, "E35"),
(NitroCliErrorEnum::EnclaveBootFailure, "E36"),
(NitroCliErrorEnum::EnclaveEventWaitError, "E37"),
(NitroCliErrorEnum::EnclaveProcessCommandNotExecuted, "E38"),
(NitroCliErrorEnum::EnclaveProcessConnectionFailure, "E39"),
(NitroCliErrorEnum::SocketPathNotFound, "E40"),
(NitroCliErrorEnum::EnclaveProcessSendReplyFailure, "E41"),
(NitroCliErrorEnum::EnclaveMmapError, "E42"),
(NitroCliErrorEnum::EnclaveMunmapError, "E43"),
(NitroCliErrorEnum::EnclaveConsoleConnectionFailure, "E44"),
(NitroCliErrorEnum::EnclaveConsoleReadError, "E45"),
(NitroCliErrorEnum::EnclaveConsoleWriteOutputError, "E46"),
(NitroCliErrorEnum::IntegerParsingError, "E47"),
(NitroCliErrorEnum::EifBuildingError, "E48"),
(NitroCliErrorEnum::DockerImageBuildError, "E49"),
(NitroCliErrorEnum::DockerImagePullError, "E50"),
(NitroCliErrorEnum::ArtifactsPathNotSet, "E51"),
(NitroCliErrorEnum::BlobsPathNotSet, "E52"),
(NitroCliErrorEnum::ClockSkewError, "E53"),
(NitroCliErrorEnum::SignalMaskingError, "E54"),
(NitroCliErrorEnum::SignalUnmaskingError, "E55"),
(NitroCliErrorEnum::LoggerError, "E56"),
(NitroCliErrorEnum::HasherError, "E57"),
(NitroCliErrorEnum::EnclaveNamingError, "E58"),
(NitroCliErrorEnum::EIFSignatureCheckerError, "E59"),
(NitroCliErrorEnum::EIFSigningError, "E60"),
].iter().cloned().collect();
}
/// Returns detailed error information based on supplied arguments.
pub fn get_detailed_info(error_code_str: String, additional_info: &[String]) -> String {
let mut ret = format!("[ {error_code_str} ] ");
let info_placeholder = "MISSING_INFO".to_string();
match error_code_str.as_str() {
"E00" => {
ret.push_str("Unspecified error. This is used as a catch-all error and should not be used in the code.");
}
"E01" => {
ret.push_str(
format!(
"Missing mandatory argument. User did not provide the `{}` argument.",
additional_info.first().unwrap_or(&info_placeholder)
)
.as_str(),
);
}
"E02" => {
ret.push_str(
format!(
"Conflicting arguments. User provided both `{}` and `{}`.",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder)
)
.as_str(),
);
}
"E03" => {
ret.push_str(
format!(
"Invalid argument provided. The parameter `{}` is not a valid integer (`{}`)",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder)
)
.as_str(),
);
}
"E04" => {
ret.push_str("Socket pair creation failure. Such error appears when the Nitro CLI process attempts to open a stream pair in order to send a command to the enclave process but the stream initialization fails.");
}
"E05" => {
ret.push_str("Process spawn failure. Such error appears when the main Nitro CLI process fails to spawn the enclave process, in order to complete a `run-enclave` command.");
}
"E06" => {
ret.push_str("Daemonize process failure. Such error appears when the system fails to daemonize a newly spawned enclave process.");
}
"E07" => {
ret.push_str("Read from disk failure. Such error appears when the Nitro CLI process fails to read the content of the enclave sockets directory (usually '/run/nitro_enclaves/') in order to perform a `describe-enclave` operation. Check that the directory exists and it has proper permissions, or run the Nitro Enclaves configuration script in order to (re)configure the environment.");
}
"E08" => {
ret.push_str("Unusable connection error. Such error appears when the Nitro CLI process attempts to open a connection to a non-existing or previously closed enclave descriptor");
}
"E09" => {
ret.push_str("Socket close error. Such error appears when the system fails to successfully close a communication channel.");
}
"E10" => {
ret.push_str("Socket connect set timeout error. Such error appears when the system fails to configure a specific timeout for a given socket. May arise when trying to connect to an enclave's console.");
}
"E11" => {
ret.push_str("Socket error. This is used as an error for catching any other socket operation errors not covered by previous custom errors.");
}
"E12" => {
ret.push_str("Epoll error. Such error appears, for instance, when the system fails to register a specific enclave descriptor with epoll in order to monitor events for it.");
}
"E13" => {
ret.push_str("Inotify error. Such error appears when the system fails to configure a socket for monitorization.");
}
"E14" => {
ret.push_str("Invalid command. Such error appears when an unknown command and / or unknown arguments are sent through a socket.");
}
"E15" => {
ret.push_str("Lock acquire failure. Such error appears when the system fails to obtain the lock for an object with concurrent access, such as a structure containing information about a running enclave.");
}
"E16" => {
ret.push_str("Thread join failure. Such error appears when the system fails to successfully join a thread, after it finished executing.");
}
"E17" => {
ret.push_str("Serde error. Such error appears when serializing / deserializing a command or response fails.");
}
"E18" => {
ret.push_str("File permissions error. Such error appears when a user other than the owner of the logging file (usually '/var/log/nitro_enclaves/nitro_enclaves.log') attempts to change the file permissions");
}
"E19" => {
ret.push_str("File operation failure. Such error appears when the system fails to perform the requested file operations, such as opening the EIF file when launching an enclave, or seeking to a specific offset in the EIF file, or writing to the log file.");
if additional_info.len() >= 2 {
ret.push_str(
format!(
"\nFile: '{}', failing operation: '{}'.",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder),
)
.as_str(),
);
}
}
"E20" => {
ret.push_str(
format!(
"Invalid CPU configuration. User provided `{}` contains same CPU(s) (CPU(s) {}) multiple times.",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder),
)
.as_str(),
);
}
"E21" => {
ret.push_str(
format!(
"No such CPU available in the pool. User provided `{}` contains CPU {}, which is not available in the pool.\nYou can add a specific CPU to the CPU pool by editing the `cpu_pool` value from '/etc/nitro_enclaves/allocator.yaml' and then enable the nitro-enclaves-allocator.service.",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder),
).as_str(),
);
}
"E22" => {
ret.push_str(
format!(
"Insufficient CPUs available in the pool. User provided `{}` is {}, which is more than the configured CPU pool size.\nYou can increase the CPU pool size by editing the `cpu_count` value from '/etc/nitro_enclaves/allocator.yaml' and then enable the nitro-enclaves-allocator.service.",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder),
).as_str(),
);
}
"E23" => {
ret.push_str("Malformed CPU ID error. Such error appears when a `lscpu` line is malformed and reports an invalid online CPUs list.");
}
"E24" => {
ret.push_str("CPU error. Such error appears when a CPU line interval is invalid (for instance, 0-3-7)");
}
"E25" => {
ret.push_str("No such hugepage flag error. Such error appears when the enclave process attempts to use an invalid hugepage size (size other than the known hugepage sizes) for initializing the enclave memory.");
}
"E26" => {
if additional_info.len() >= 3 {
ret.push_str(
format!(
"Insufficient memory requested. User provided `{}` is {} MB, but based on the EIF file size, the minimum memory should be {} MB",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder),
additional_info.get(2).unwrap_or(&info_placeholder)
).as_str(),
);
} else {
ret.push_str(
format!(
"Insufficient memory requested. User provided `{}` is {} MB, and memory should be greater than 0 MB.",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder)
).as_str(),
);
}
}
"E27" => {
ret.push_str(
format!(
"Insufficient memory available. User provided `{}` is {} MB, which is more than the available hugepage memory.\nYou can increase the available memory by editing the `memory_mib` value from '/etc/nitro_enclaves/allocator.yaml' and then restart the nitro-enclaves-allocator.service.",
additional_info.first().unwrap_or(&info_placeholder),
additional_info.get(1).unwrap_or(&info_placeholder)
).as_str(),
);
}
"E28" => {
ret.push_str("Invalid enclave descriptor. Such error appears when the NE_CREATE_VM ioctl returns with an error.");
}
"E29" => {
ret.push_str("Ioctl failure. Such error is used as a general ioctl error and appears whenever an ioctl fails. In this case, the error backtrace provides detailed information on what specifically failed during the ioctl.");
}
"E30" => {
ret.push_str("Ioctl image get load info failure. Such error appears when the ioctl used for getting the memory load information fails. In this case, the error backtrace provides detailed information on what specifically failed during the ioctl.");
}
"E31" => {
ret.push_str("Ioctl set memory region failure. Such error appears when the ioctl used for setting a given memory region fails. In this case, the error backtrace provides detailed information on what specifically failed during the ioctl.");
}
"E32" => {
ret.push_str("Ioctl add vCPU failure. Such error appears when the ioctl used for adding a vCPU fails. In this case, the error backtrace provides detailed information on what specifically failed during the ioctl.");
}
"E33" => {
ret.push_str("Ioctl start enclave failure. Such error appears when the ioctl used for starting an enclave fails. In this case, the error backtrace provides details information on what specifically failed during the ioctl.");
}
"E34" => {
ret.push_str("Memory overflow. Such error may appear during loading the EIF in the memory regions which will be conceded to the future enclave, if the regions offset plus the EIF file size exceeds the maximum address of the target platform.");
}
"E35" => {
ret.push_str("EIF file parsing error. Such errors appear when attempting to fill a memory region with a section of the EIF file, but reading the entire section fails. This might indicate that the required hugepages are not available.");
}
"E36" => {
ret.push_str("Enclave boot failure. Such error appears when attempting to receive the `ready` signal from a freshly booted enclave. It arises in several contexts, for instance, when the enclave is booted from an invalid EIF file and the enclave process immediately exits, failing to submit the `ready` signal. In this case, the error backtrace provides detailed information on what specifically failed during the enclave boot process.");
}
"E37" => {
ret.push_str("Enclave event wait error. Such error appears when monitoring an enclave descriptor for events fails.");
}
"E38" => {
ret.push_str("Enclave process command not executed error. Such error appears when at least one enclave fails to provide the description information.");
}
"E39" => {
ret.push_str("Enclave process connection failure. Such error appears when the enclave manager fails to connect to at least one enclave process for retrieving the description information.");
}
"E40" => {
ret.push_str("Socket path not found. Such error appears when the Nitro CLI process fails to build the corresponding socket path starting from a given enclave ID.");
}
"E41" => {
ret.push_str("Enclave process send reply failure. Such error appears when the enclave process fails to submit the status code to the Nitro CLI process after performing a run / describe / terminate command.");
}
"E42" => {
ret.push_str(
"Enclave mmap error. Such error appears when allocating the enclave memory fails.",
);
}
"E43" => {
ret.push_str(
"Enclave munmap error. Such error appears when unmapping the enclave memory fails.",
);
}
"E44" => {
ret.push_str("Enclave console connection failure. Such error appears when the Nitro CLI process fails to establish a connection to a running enclave's console.");
}
"E45" => {
ret.push_str("Enclave console read error. Such error appears when reading from a running enclave's console fails.");
}
"E46" => {
ret.push_str("Enclave console write output error. Such error appears when writing the information retrieved from a running enclave's console (to a given stream) fails.");
}
"E47" => {
ret.push_str("Integer parsing error. Such error appears when trying to connect to a running enclave's console, but the enclave CID cannot be parsed correctly.");
}
"E48" => {
ret.push_str("EIF building error. Such error appears when trying to build an EIF file. In this case, the error backtrace provides detailed information on the failure reason.");
}
"E49" => {
ret.push_str("Docker image build error. Such error appears when trying to build and EIF file, but building the corresponding docker image fails. In this case, the error backtrace provides detailed information on the failure reason.");
}
"E50" => {
ret.push_str("Docker image pull error. Such error appears when trying to build an EIF file, but pulling the corresponding docker image fails. In this case, the error backtrace provides detailed informatino on the failure reason.");
}
"E51" => {
ret.push_str("Artifacts path environment variable not set. Such error appears when trying to build an EIF file, but the artifacts path environment variable is not set.");
}
"E52" => {
ret.push_str("Blobs path environment variable not set. Such error appears when trying to build an EIF file, but the blobs path environment variable is not set.");
}
"E53" => {
ret.push_str("Clock skew error. Such error appears when continuously reading from a running enclave's console, but measuring the time elapsed between consecutive reads failed.");
}
"E54" => {
ret.push_str("Signal masking error. Such error appears if attempting to mask specific signals before creating an enclave process fails.");
}
"E55" => {
ret.push_str("Signal unmasking error. Such error appears if attempting to unmask specific signals after creating an enclave process fails.");
}
"E56" => {
ret.push_str("Logger error. Such error appears when attempting to initialize the underlying logging system fails.");
}
"E57" => {
ret.push_str("Hasher error. Such error appears when trying to initialize a hasher or write bytes to it, resulting in a IO error.");
}
"E58" => {
ret.push_str("Naming error. Such error appears when trying to perform an enclave operation using the enclave name and the name is invalid.");
}
"E59" => {
ret.push_str("EIF signature checker error. Such error appears when validation of the signing certificate fails.");
}
"E60" => {
ret.push_str("Signing error. Such error appears if incorrect key or certificate paths are provided, or when AWS credenrials need to be refreshed to use a KMS key.");
}
_ => {
ret.push_str(format!("No such error code {error_code_str}").as_str());
}
}
ret
}
/// Returns a link with more detailed information regarding a specific error.
pub fn construct_help_link(error_code_str: String) -> String {
format!("https://docs.aws.amazon.com/enclaves/latest/user/cli-errors.html#{error_code_str}")
}
/// Returns a string containing the backtrace recorded during propagating an error message
pub fn construct_backtrace(failure_info: &NitroCliFailure) -> String {
let version = env!("CARGO_PKG_VERSION").to_string();
format!(" Action: {}\n Subactions:{}\n Root error file: {}\n Root error line: {}\n Version: {}",
failure_info.action,
failure_info.subactions.iter().rev().fold("".to_string(), |acc, x| {
format!("{acc}\n {x}")
}),
failure_info.file,
failure_info.line,
version)
}
/// Detailed information based on user-provided error code.
pub fn explain_error(error_code_str: String) {
match error_code_str.as_str() {
"E00" => {
eprintln!("Unspecified error. This is used as a catch-all error and should not be used in the code.");
}
"E01" => {
eprintln!("Missing mandatory argument. Such error appears when the Nitro CLI is requested to perform an operation, but not all of the mandatory arguments were supplied.\n\tExample: `nitro-cli run-enclave --cpu-count 2 --eif-path /path/to/my/eif`. Note that in this case, the mandatory parameter `--memory` is missing a value.");
}
"E02" => {
eprintln!("CLI conflicting arguments. Such error appears when the Nitro CLI is supplied two contradicting arguments at the same time, such as `--cpu-count` and `--cpu-ids`.\nIn this case, only one of the parameters should be supplied.");
}
"E03" => {
eprintln!("Invalid argument provided. Such error appears when the type of at least one of the arguments provided to the Nitro CLI does not match the expected type of that parameter.\n\tExample: `nitro-cli run-enclave --cpu-count 1z --memory 80 --eif-path /path/to/my/eif`. In this case, `cpu-count` is not a valid integer value." );
}
"E04" => {
eprintln!("Socket pair creation failure. Such error apears when the Nitro CLI process attempts to open a stream pair in order to send a command to the enclave process, but the stream initialization fails.");
}
"E05" => {
eprintln!("Process spawn failure. Such error appears when the main Nitro CLI process failed to spawn the enclave process, in order to complete a `run-enclave` command.");
}
"E06" => {
eprintln!("Daemonize process failure. Such error appears when the system fails to daemonize the newly spawned enclave process.")
}
"E07" => {
eprintln!("Read from disk failure. Such error appears when the Nitro CLI process fails to read the content of the enclave sockets directory (usually '/run/nitro_enclaves/') in order to perform a `describe-enclave` operation. Check that the directory exists and it has proper permissions, or run the Nitro Enclaves configuration script in order to (re)configure the environment.");
}
"E08" => {
eprintln!("Unusable connection error. Such error appears when the Nitro CLI process attempts to open a connection to a non-existing or previously closed enclave descriptor");
}
"E09" => {
eprintln!("Socket close error. Such error appears when the system fails to successfully close a communication channel.");
}
"E10" => {
eprintln!("Socket connect set timeout error. Such error appears when the system fails to configure a specific timeout for a given socket. May arise when trying to connect to an enclave's console.");
}
"E11" => {
eprintln!("Socket error. This is used as an error for catching any other socket operation errors not covered by previous custom errors.");
}
"E12" => {
eprintln!("Epoll error. Such error appears, for instance, when the system fails to register a specific enclave descriptor with epoll in order to monitor events for it.");
}
"E13" => {
eprintln!("Inotify error. Such error appears when the system fails to configure a socket for monitorization.");
}
"E14" => {
eprintln!("Invalid command. Such error appears when an unknown command and / or unknown arguments are sent through a socket.");
}
"E15" => {
eprintln!("Lock acquire failure. Such error appears when the system fails to obtain the lock for an object with concurrent access, such as a structure containing information about a running enclave.");
}
"E16" => {
eprintln!("Thread join failure. Such error appears when the system fails to successfully join a thread, after it finished executing.");
}
"E17" => {
eprintln!("Serde error. Such error appears when serializing / deserializing a command or response fails.");
}
"E18" => {
eprintln!("File permissions error. Such error appears when a user other than the owner of the logging file (usually '/var/log/nitro_enclaves/nitro_enclaves.log') attempts to change the file permissions");
}
"E19" => {
eprintln!("File operation failure. Such error appears when the system fails to perform the requested file operations, such as opening the EIF file when launching an enclave, or seeking to a specific offset in the EIF file, or writing to the log file.");
}
"E20" => {
eprintln!("Invalid CPU configuration. Such error appears when the user supplies the same CPU ID multiple times.\n\tExample: `nitro-cli run-enclave --cpu-ids 1 1 --memory 80 --eif-path /path/to/my/eif`. In this case, CPU ID `1` has been selected twice.");
}
"E21" => {
eprintln!("No such CPU available in the pool. Such error appears when the user requests to run an enclave with at least one CPU ID which does not exist in the CPU pool.\n\tExample: (configured CPU pool: [1,9]) `nitro-cli run-enclave --cpu-ids 1 2 --memory 80 --eif-path /path/to/my/eif`. In this case, CPU 2 is not in the configured CPU pool.");
}
"E22" => {
eprintln!("Insufficient CPUs available in the pool. Such error appears when the user requests to run an enclave with more CPUs that available in the CPU pool.\n\tExample: (configured CPU pool: [1,9]) `nitro-cli run-enclave --cpu-count 4 --memory 80 --eif-path /path/to/my/eif`. In this case, the user requested 4 CPUs, but the CPU pool contains only 2.");
}
"E23" => {
eprintln!("Malformed CPU ID error. Such error appears when a `lscpu` line is malformed and reports an invalid online CPUs list.");
}
"E24" => {
eprintln!(
"CPU error. Such error appears when a CPU line interval is invalid (as in 0-3-7)"
);
}
"E25" => {
eprintln!("No such hugepage flag error. Such error appears when the enclave process attempts to use an invalid hugepage size (size other than the known hugepage sizes) for initializing the enclave memory.");
}
"E26" => {
eprintln!("Insufficient memory requested. Such error appears when the user requests to launch an enclave with not enough memory. The enclave memory should be at least equal to the size of the EIF file used for launching the enclave.\n\tExample: (EIF file size: 11MB) `nitro-cli run-enclave --cpu-count 2 --memory 5 --eif-path /path/to/my/eif`. In this case, the user requested to run an enclave with only 5MB of memory, whereas the EIF file alone requires 11MB.");
}
"E27" => {
eprintln!("Insufficient memory available. Such error appears when the user requests to launch an enclave with more memory than available. The enclave memory should be at most equal to the size of the configured hugepage memory.\n\tExample: (previously configured 80MB of hugepage memory) `nitro-cli run-enclave --cpu-count 2 --memory 100 --eif-path /path/to/my/eif`. In this case, the user requested to run an enclave with 100MB of memory, whereas the system has only 80MB available for enclaves. As a solution, (re)configure the Nitro Enclaves environment, specifying a higher value for the available memory.");
}
"E28" => {
eprintln!("Invalid enclave descriptor. Such error appears when the NE_CREATE_VM ioctl returns with an error.");
}
"E29" => {
eprintln!("Ioctl failure. Such error is used as a general ioctl error and appears whenever an ioctl fails. In this case, the error backtrace provides detailed information on what specifically failed during the ioctl.");
}
"E30" => {
eprintln!("Ioctl image get load info failure. Such error appears when the ioctl used for getting the memory load information fails. In this case, the error backtrace provides detailed information on what specifically failed during the ioctl.");
}
"E31" => {
eprintln!("Ioctl set memory region failure. Such error appears when the ioctl used for setting a given memory region fails. In this case, the error backtrace provides detailed information on what specifically failed during the ioctl.");
}
"E32" => {
eprintln!("Ioctl add vCPU failure. Such error appears when the ioctl used for adding a vCPU fails. In this case, the error backtrace provides detailed information on what specifically failed during the ioctl.");
}
"E33" => {
eprintln!("Ioctl start enclave failure. Such error appears when the ioctl used for starting an enclave fails. In this case, the error backtrace provides details information on what specifically failed during the ioctl.");
}
"E34" => {
eprintln!("Memory overflow. Such error may appear during loading the EIF in the memory regions which will be conceded to the future enclave, if the regions offset plus the EIF file size exceeds the maximum address of the target platform.");
}
"E35" => {
eprintln!("EIF file parsing error. Such errors appear when attempting to fill a memory region with a section of the EIF file, but reading the entire section fails. This might indicate that the required hugepages are not available.");
}
"E36" => {
eprintln!("Enclave boot failure. Such error appears when attempting to receive the `ready` signal from a freshly booted enclave. It arises in several contexts, for instance, when the enclave is booted from an invalid EIF file and the enclave process immediately exits, failing to submit the `ready` signal. In this case, the error backtrace provides detailed information on what specifically failed during the enclave boot process.");
}
"E37" => {
eprintln!("Enclave event wait error. Such error appears when monitoring an enclave descriptor for events fails.");
}
"E38" => {
eprintln!("Enclave process command not executed error. Such error appears when at least one enclave fails to provide the description information.");
}
"E39" => {
eprintln!("Enclave process connection failure. Such error appears when the enclave manager fails to connect to at least one enclave process for retrieving the description information.");
}
"E40" => {
eprintln!("Socket path not found. Such error appears when the Nitro CLI process fails to build the corresponding socket path starting from a given enclave ID.");
}
"E41" => {
eprintln!("Enclave process send reply failure. Such error appears when the enclave process fails to submit the status code to the Nitro CLI process after performing a run / describe / terminate command.");
}
"E42" => {
eprintln!(
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | true |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/common/json_output.rs | src/common/json_output.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
#![allow(clippy::too_many_arguments)]
use aws_nitro_enclaves_image_format::defs::EifIdentityInfo;
use aws_nitro_enclaves_image_format::utils::eif_reader::SignCertificateInfo;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
/// The information to be provided for a `describe-enclaves` request.
#[derive(Clone, Serialize, Deserialize)]
pub struct EnclaveDescribeInfo {
/// Enclave name assigned by the user
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "EnclaveName")]
pub enclave_name: Option<String>,
#[serde(rename = "EnclaveID")]
/// The full ID of the enclave.
pub enclave_id: String,
#[serde(rename = "ProcessID")]
/// The PID of the enclave process which manages the enclave.
pub process_id: u32,
#[serde(rename = "EnclaveCID")]
/// The enclave's CID.
pub enclave_cid: u64,
#[serde(rename = "NumberOfCPUs")]
/// The number of CPUs used by the enclave.
pub cpu_count: u64,
#[serde(rename = "CPUIDs")]
/// The IDs of the CPUs used by the enclave.
pub cpu_ids: Vec<u32>,
#[serde(rename = "MemoryMiB")]
/// The memory provided to the enclave (in MiB).
pub memory_mib: u64,
#[serde(rename = "State")]
/// The current state of the enclave.
pub state: String,
#[serde(rename = "Flags")]
/// The bit-mask which provides the enclave's launch flags.
pub flags: String,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(flatten)]
/// Build measurements containing PCRs
pub build_info: Option<EnclaveBuildInfo>,
/// Assigned or default EIF name
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "ImageName")]
pub img_name: Option<String>,
#[serde(rename = "ImageVersion")]
/// Assigned or default EIF version
#[serde(skip_serializing_if = "Option::is_none")]
pub img_version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "Metadata")]
/// EIF metadata
pub metadata: Option<MetadataDescribeInfo>,
}
impl EnclaveDescribeInfo {
/// Create a new `EnclaveDescribeInfo` instance from the given enclave information.
pub fn new(
enclave_name: Option<String>,
enclave_id: String,
enclave_cid: u64,
cpu_count: u64,
cpu_ids: Vec<u32>,
memory_mib: u64,
state: String,
flags: String,
build_info: Option<EnclaveBuildInfo>,
img_name: Option<String>,
img_version: Option<String>,
metadata: Option<MetadataDescribeInfo>,
) -> Self {
EnclaveDescribeInfo {
enclave_name,
enclave_id,
process_id: std::process::id(),
enclave_cid,
cpu_count,
cpu_ids,
memory_mib,
state,
flags,
build_info,
img_name,
img_version,
metadata,
}
}
}
/// The information to be provided for a `run-enclave` request.
#[derive(Clone, Serialize, Deserialize)]
pub struct EnclaveRunInfo {
#[serde(rename = "EnclaveName")]
/// The name of the enclave.
pub enclave_name: String,
#[serde(rename = "EnclaveID")]
/// The full ID of the enclave.
pub enclave_id: String,
#[serde(rename = "ProcessID")]
/// The PID of the enclave process which manages the enclave.
pub process_id: u32,
#[serde(rename = "EnclaveCID")]
/// The enclave's CID.
pub enclave_cid: u64,
#[serde(rename = "NumberOfCPUs")]
/// The number of CPUs used by the enclave.
pub cpu_count: usize,
#[serde(rename = "CPUIDs")]
/// The IDs of the CPUs used by the enclave.
pub cpu_ids: Vec<u32>,
#[serde(rename = "MemoryMiB")]
/// The memory provided to the enclave (in MiB).
pub memory_mib: u64,
}
impl EnclaveRunInfo {
/// Create a new `EnclaveRunInfo` instance from the given enclave information.
pub fn new(
enclave_name: String,
enclave_id: String,
enclave_cid: u64,
cpu_count: usize,
cpu_ids: Vec<u32>,
memory_mib: u64,
) -> Self {
EnclaveRunInfo {
enclave_name,
enclave_id,
process_id: std::process::id(),
enclave_cid,
cpu_count,
cpu_ids,
memory_mib,
}
}
}
/// The information to be provided for a `terminate-enclave` request.
#[derive(Clone, Serialize, Deserialize)]
pub struct EnclaveTerminateInfo {
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "EnclaveName")]
/// The name of the enclave. Optional for older versions.
pub enclave_name: Option<String>,
#[serde(rename = "EnclaveID")]
/// The full ID of the enclave.
pub enclave_id: String,
#[serde(rename = "Terminated")]
/// A flag indicating if the enclave has terminated.
pub terminated: bool,
}
impl EnclaveTerminateInfo {
/// Create a new `EnclaveTerminateInfo` instance from the given enclave information.
pub fn new(enclave_name: Option<String>, enclave_id: String, terminated: bool) -> Self {
EnclaveTerminateInfo {
enclave_name,
enclave_id,
terminated,
}
}
}
/// The information to be provided for a `build-enclave` request.
#[derive(Serialize, Clone, Deserialize, Debug)]
pub struct EnclaveBuildInfo {
#[serde(rename = "Measurements")]
/// The measurement results (hashes) of various enclave properties.
pub measurements: BTreeMap<String, String>,
}
impl EnclaveBuildInfo {
/// Create a new `EnclaveBuildInfo` instance from the given measurements.
pub fn new(measurements: BTreeMap<String, String>) -> Self {
EnclaveBuildInfo { measurements }
}
}
/// The information to be provided for a `describe-eif` request.
#[derive(Clone, Serialize, Deserialize)]
pub struct EifDescribeInfo {
#[serde(rename = "EifVersion")]
/// EIF version.
pub version: u16,
#[serde(flatten)]
/// Contains the PCR values.
pub build_info: EnclaveBuildInfo,
#[serde(rename = "IsSigned")]
/// Specifies if the image is signed or not.
pub is_signed: bool,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "SigningCertificate")]
/// Certificate's signature algorithm
pub cert_info: Option<SignCertificateInfo>,
#[serde(rename = "CheckCRC")]
/// Specifies if the CRC check passed.
pub crc_check: bool,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "SignatureCheck")]
/// Specifies if the EIF signature check passed.
pub sign_check: Option<bool>,
#[serde(rename = "ImageName")]
/// Assigned or default EIF name
#[serde(skip_serializing_if = "Option::is_none")]
pub img_name: Option<String>,
#[serde(rename = "ImageVersion")]
/// Assigned or default EIF version
#[serde(skip_serializing_if = "Option::is_none")]
pub img_version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "Metadata")]
/// EIF metadata
pub metadata: Option<MetadataDescribeInfo>,
}
/// Metadata to be included in the describe output
#[derive(Clone, Serialize, Deserialize)]
pub struct MetadataDescribeInfo {
#[serde(rename = "BuildTime")]
/// Time of the build
pub build_time: String,
#[serde(rename = "BuildTool")]
/// Tool used for EIF build
pub build_tool: String,
#[serde(rename = "BuildToolVersion")]
/// Version of the build tool
pub tool_version: String,
#[serde(rename = "OperatingSystem")]
/// Enclave OS
pub operating_system: String,
#[serde(rename = "KernelVersion")]
/// Enclave kernel version
pub kernel_version: String,
#[serde(rename = "DockerInfo")]
/// Docker image information
pub docker_info: serde_json::Value,
#[serde(skip_serializing_if = "serde_json::Value::is_null")]
#[serde(rename = "CustomMetadata")]
#[serde(flatten)]
/// Metadata added by the user as JSON
pub custom_metadata: serde_json::Value,
}
impl MetadataDescribeInfo {
/// Construct metadata output struct
pub fn new(eif_info: EifIdentityInfo) -> Self {
MetadataDescribeInfo {
build_time: eif_info.build_info.build_time,
build_tool: eif_info.build_info.build_tool,
tool_version: eif_info.build_info.build_tool_version,
operating_system: eif_info.build_info.img_os,
kernel_version: eif_info.build_info.img_kernel,
docker_info: eif_info.docker_info,
custom_metadata: eif_info.custom_info,
}
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/common/mod.rs | src/common/mod.rs | // Copyright 2020-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
/// The module which parses command parameters from command-line arguments.
pub mod commands_parser;
/// The module which provides mappings between NitroCliErrors and their corresponding code.
pub mod document_errors;
/// The module which provides JSON-ready information structures.
pub mod json_output;
/// The module which provides the per-process logger.
pub mod logger;
/// The module which provides signal handling.
pub mod signal_handler;
use chrono::offset::Utc;
use log::error;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use std::env;
use std::io::{Read, Write};
#[cfg(test)]
use std::os::raw::c_char;
use std::os::unix::net::UnixStream;
use std::path::{Path, PathBuf};
use document_errors::ERROR_CODES;
use logger::get_log_file_base_path;
/// The most common result type provided by Nitro CLI operations.
pub type NitroCliResult<T> = Result<T, NitroCliFailure>;
/// The CID for the vsock device of the parent VM.
pub const VMADDR_CID_PARENT: u32 = 3;
/// The vsock port used to confirm that the enclave has booted.
pub const ENCLAVE_READY_VSOCK_PORT: u32 = 9000;
/// The amount of time in milliseconds an enclave process will wait for certain operations.
pub const ENCLAVE_PROC_WAIT_TIMEOUT_MSEC: isize = 3000;
/// The confirmation code sent by an enclave process to a requesting CLI instance
/// in order to signal that it is alive.
pub const MSG_ENCLAVE_CONFIRM: u64 = 0xEEC0;
/// The environment variable which holds the path to the Unix sockets directory.
pub const SOCKETS_DIR_PATH_ENV_VAR: &str = "NITRO_CLI_SOCKETS_PATH";
/// The default path to the Unix sockets directory.
const SOCKETS_DIR_PATH: &str = "/run/nitro_enclaves";
/// Constant used for identifying the backtrace environment variable.
const BACKTRACE_VAR: &str = "BACKTRACE";
/// All possible errors which may occur.
#[derive(Debug, Default, Clone, Copy, Hash, PartialEq)]
pub enum NitroCliErrorEnum {
#[default]
/// Unspecified error (should avoid using it thoughout the code).
UnspecifiedError = 0,
/// Error for handling missing arguments.
MissingArgument,
/// Error for handling conflicting arguments.
ConflictingArgument,
/// Invalid type argument.
InvalidArgument,
/// Failed to create socket pair.
SocketPairCreationFailure,
/// Failed to spawn a child process.
ProcessSpawnFailure,
/// Failed to daemonize current process.
DaemonizeProcessFailure,
/// Failed to read requested content from disk.
ReadFromDiskFailure,
/// Unusable connection error.
UnusableConnectionError,
/// Socket close error.
SocketCloseError,
/// Socket connect timeout error.
SocketConnectTimeoutError,
/// General error for handling socket-related errors.
SocketError,
/// General error for handling epoll-related errors.
EpollError,
/// General error for handling inotify-related errors.
InotifyError,
/// Invalid command format.
InvalidCommand,
/// Lock acquire failure.
LockAcquireFailure,
/// Thread join failure.
ThreadJoinFailure,
/// General error for handling serde-related errors.
SerdeError,
/// File permissions error.
FilePermissionsError,
/// File operation failure.
FileOperationFailure,
/// Invalid CPU list configuration.
InvalidCpuConfiguration,
/// Requested CPU not available in the pool.
NoSuchCpuAvailableInPool,
/// Not enough CPUs available in the pool.
InsufficientCpus,
/// Malformed CPU ID error.
MalformedCpuId,
/// General error to catch all other CPU-related errors.
CpuError,
/// No such hugepage map flag.
NoSuchHugepageFlag,
/// Insufficient memory requested.
InsufficientMemoryRequested,
/// Insufficient memory available.
InsufficientMemoryAvailable,
/// Invalid enclave file descriptor.
InvalidEnclaveFd,
/// General ioctl failure.
IoctlFailure,
/// Image load info ioctl failure.
IoctlImageLoadInfoFailure,
/// Enclave set memory region ioctl failure.
IoctlSetMemoryRegionFailure,
/// VCPU add ioctl failure.
IoctlAddVcpuFailure,
/// Enclave start ioctl failure.
IoctlEnclaveStartFailure,
/// Memory overflow.
MemoryOverflow,
/// General EIF parsing related error.
EifParsingError,
/// Error specific to enclave booting issues.
EnclaveBootFailure,
/// Enclave event wait error.
EnclaveEventWaitError,
/// Enclave process command was not executed.
EnclaveProcessCommandNotExecuted,
/// Could not connect to an enclave process.
EnclaveProcessConnectionFailure,
/// Socket path not found.
SocketPathNotFound,
/// Enclave process failed to send back reply.
EnclaveProcessSendReplyFailure,
/// Error when trying to allocate enclave memory regions.
EnclaveMmapError,
/// Error when trying to release enclave memory regions.
EnclaveMunmapError,
/// Enclave connection to console failed.
EnclaveConsoleConnectionFailure,
/// Error when reading from the console.
EnclaveConsoleReadError,
/// Error when writing console output to stream.
EnclaveConsoleWriteOutputError,
/// Integer parsing error.
IntegerParsingError,
/// Could not build EIF file.
EifBuildingError,
/// Could not build Docker image.
DockerImageBuildError,
/// Could not pull Docker image.
DockerImagePullError,
/// Artifacts path environment variable not set.
ArtifactsPathNotSet,
/// Blobs path environment variable not set.
BlobsPathNotSet,
/// Clock skew error.
ClockSkewError,
/// Signal masking error.
SignalMaskingError,
/// Signal unmasking error.
SignalUnmaskingError,
/// General error for handling logger-related errors.
LoggerError,
/// Hasher operation error
HasherError,
/// Enclave naming error
EnclaveNamingError,
/// Signature checker error
EIFSignatureCheckerError,
/// Signing error
EIFSigningError,
}
impl Eq for NitroCliErrorEnum {}
/// The type of commands that can be sent to an enclave process.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum EnclaveProcessCommandType {
/// Launch (run) an enclave (sent by the CLI).
Run = 0,
/// Terminate an enclave (sent by the CLI).
Terminate,
/// Notify that the enclave has terminated (sent by the enclave process to itself).
TerminateComplete,
/// Describe an enclave (broadcast by the CLI).
Describe,
/// Request an enclave's CID (sent by the CLI).
GetEnclaveCID,
/// Request an enclave's flags (sent by the CLI).
GetEnclaveFlags,
/// Request an enclave's name (sent by the CLI).
GetEnclaveName,
/// Request the ID of an enclave only if the name matches (sent by the CLI).
GetIDbyName,
/// Notify the socket connection listener to shut down (sent by the enclave process to itself).
ConnectionListenerStop,
/// Do not execute a command due to insufficient privileges (sent by the CLI, modified by the enclave process).
NotPermitted,
}
/// The type of replies that an enclave process can send to a CLI instance.
#[derive(Debug, Serialize, Deserialize)]
pub enum EnclaveProcessReply {
/// A message which must be printed to the CLI's standard output.
StdOutMessage(String),
/// A messge which must be printed to the CLI's standard error.
StdErrMessage(String),
/// The status of the operation that the enclave process has performed.
Status(i32),
}
/// Struct that is passed along the backtrace and accumulates error messages.
#[derive(Debug, Default, PartialEq, Eq)]
pub struct NitroCliFailure {
/// Main action which was attempted and failed.
pub action: String,
/// (Possibly) more subactions which lead to the root cause of the failure.
pub subactions: Vec<String>,
/// Computer-readable error code.
pub error_code: NitroCliErrorEnum,
/// File in which the root error occurred.
pub file: String,
/// Line at which the root error occurred.
pub line: u32,
/// Additional info regarding the error, passed as individual components (for easier parsing).
pub additional_info: Vec<String>,
}
impl NitroCliFailure {
/// Returns an empty `NitroCliFailure` object.
pub fn new() -> Self {
NitroCliFailure {
action: String::new(),
subactions: vec![],
error_code: NitroCliErrorEnum::default(),
file: String::new(),
line: 0,
additional_info: vec![],
}
}
/// Sets the main action which failed (i.e. RUN_ENCLAVE).
pub fn set_action(mut self, action: String) -> Self {
self.action = action;
self
}
/// Adds a new layer into the backtrace, corresponding to a failing subaction (i.e. NOT_ENOUGH_MEM).
pub fn add_subaction(mut self, subaction: String) -> Self {
self.subactions.push(subaction);
self
}
/// Sets the error code.
pub fn set_error_code(mut self, error_code: NitroCliErrorEnum) -> Self {
self.error_code = error_code;
self
}
/// Sets the name of the file the error occurred in.
pub fn set_file(mut self, file: &str) -> Self {
self.file = file.to_string();
self
}
/// Sets the number of the line the error occurred on.
pub fn set_line(mut self, line: u32) -> Self {
self.line = line;
self
}
/// Sets both error file and error line.
pub fn set_file_and_line(mut self, file: &str, line: u32) -> Self {
self.file = file.to_string();
self.line = line;
self
}
/// Include additional error information.
pub fn add_info(mut self, info: Vec<&str>) -> Self {
for info_ in info {
self.additional_info.push(info_.to_string());
}
self
}
}
/// Macro used for constructing a NitroCliFailure in a more convenient manner.
#[macro_export]
macro_rules! new_nitro_cli_failure {
($subaction:expr, $error_code:expr) => {
NitroCliFailure::new()
.add_subaction(($subaction).to_string())
.set_error_code($error_code)
.set_file_and_line(file!(), line!())
};
}
/// Logs the given backtrace string to a separate, backtrace-specific file.
/// Returns a string denoting the path to the corresponding log file.
fn log_backtrace(backtrace: String) -> Result<String, &'static str> {
let log_path_base = get_log_file_base_path();
// Check if backtrace logs location exists and create it if necessary.
if !Path::new(&log_path_base).exists() {
let create_logs_dir = std::fs::create_dir_all(&log_path_base);
if create_logs_dir.is_err() {
return Err("Could not create backtrace logs directory");
}
}
let utc_time_now = Utc::now().to_rfc3339();
let log_path_str = format!("{}/err{}.log", &log_path_base, utc_time_now);
let log_path = Path::new(&log_path_str);
let log_file = std::fs::File::create(log_path);
if log_file.is_err() {
return Err("Could not create backtrace log file");
}
let write_result = log_file.unwrap().write_all(backtrace.as_bytes());
if write_result.is_err() {
return Err("Could not write to backtrace log file");
}
match log_path.to_str() {
Some(log_path) => Ok(log_path.to_string()),
None => Err("Could not return log file path"),
}
}
/// Assembles the error message which gets displayed to the user.
pub fn construct_error_message(failure: &NitroCliFailure) -> String {
// Suggestive error description comes first.
let error_info: String = document_errors::get_detailed_info(
(*ERROR_CODES.get(&failure.error_code).unwrap_or(&"E00")).to_string(),
&failure.additional_info,
);
// Include a link to the documentation page.
let help_link: String = document_errors::construct_help_link(
(*ERROR_CODES.get(&failure.error_code).unwrap_or(&"E00")).to_string(),
);
let backtrace: String = document_errors::construct_backtrace(failure);
// Write backtrace to a log file.
let log_path = log_backtrace(backtrace.clone());
// Return final output, depending on whether the user requested the backtrace or not.
match std::env::var(BACKTRACE_VAR) {
Ok(display_backtrace) => match display_backtrace.as_str() {
"1" => {
if let Ok(log_path) = log_path {
format!(
"{error_info}\n\nFor more details, please visit {help_link}\n\nBacktrace:\n{backtrace}\n\nIf you open a support ticket, please provide the error log found at \"{log_path}\""
)
} else {
format!(
"{error_info}\n\nFor more details, please visit {help_link}\n\nBacktrace:\n{backtrace}"
)
}
}
_ => {
if let Ok(log_path) = log_path {
format!(
"{error_info}\n\nFor more details, please visit {help_link}\n\nIf you open a support ticket, please provide the error log found at \"{log_path}\""
)
} else {
format!("{error_info}\n\nFor more details, please visit {help_link}")
}
}
},
_ => {
if let Ok(log_path) = log_path {
format!(
"{error_info}\n\nFor more details, please visit {help_link}\n\nIf you open a support ticket, please provide the error log found at \"{log_path}\""
)
} else {
format!("{error_info}\n\nFor more details, please visit {help_link}")
}
}
}
}
/// A trait which allows a more graceful program exit instead of the standard `panic`.
/// Provides a custom exit code.
pub trait ExitGracefully<T> {
/// Provide the inner value of a `Result` or exit gracefully with a message and custom errno.
fn ok_or_exit_with_errno(self, additional_info: Option<&str>) -> T;
}
impl<T> ExitGracefully<T> for NitroCliResult<T> {
/// Provide the inner value of a `Result` or exit gracefully with a message and custom errno.
fn ok_or_exit_with_errno(self, additional_info: Option<&str>) -> T {
match self {
Ok(val) => val,
Err(err) => {
let err_str = construct_error_message(&err);
if let Some(additional_info_str) = additional_info {
notify_error(&format!("{additional_info_str} | {err_str}"));
} else {
notify_error(&err_str);
}
std::process::exit(err.error_code as i32);
}
}
}
}
/// Notify both the user and the logger of an error.
pub fn notify_error(err_msg: &str) {
eprintln!("{err_msg}");
error!("{}", err_msg);
}
/// Read a LE-encoded 64-bit unsigned value from a socket.
pub fn read_u64_le(socket: &mut dyn Read) -> NitroCliResult<u64> {
let mut bytes = [0u8; std::mem::size_of::<u64>()];
socket.read_exact(&mut bytes).map_err(|e| {
new_nitro_cli_failure!(
&format!(
"Failed to read {} bytes from the given socket: {:?}",
std::mem::size_of::<u64>(),
e
),
NitroCliErrorEnum::SocketError
)
})?;
Ok(u64::from_le_bytes(bytes))
}
/// Write a LE-encoded 64-bit unsigned value to a socket.
pub fn write_u64_le(socket: &mut dyn Write, value: u64) -> NitroCliResult<()> {
let bytes = value.to_le_bytes();
socket.write_all(&bytes).map_err(|e| {
new_nitro_cli_failure!(
&format!(
"Failed to write {} bytes to the given socket: {:?}",
std::mem::size_of::<u64>(),
e
),
NitroCliErrorEnum::SocketError
)
})
}
/// Send a command to a single socket.
pub fn enclave_proc_command_send_single<T>(
cmd: EnclaveProcessCommandType,
args: Option<&T>,
mut socket: &mut UnixStream,
) -> NitroCliResult<()>
where
T: Serialize,
{
// Serialize the command type.
let mut cmd_bytes = Vec::new();
ciborium::ser::into_writer(&cmd, &mut cmd_bytes).map_err(|e| {
new_nitro_cli_failure!(
&format!("Invalid command format: {e:?}"),
NitroCliErrorEnum::InvalidCommand
)
})?;
// The command is written twice. The first read is done by the connection listener to check if this is
// a shut-down command. The second read is done by the enclave process for all non-shut-down commands.
for _ in 0..2 {
write_u64_le(&mut socket, cmd_bytes.len() as u64)
.map_err(|e| e.add_subaction("Failed to send single command size".to_string()))?;
socket.write_all(&cmd_bytes[..]).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to send single command: {e:?}"),
NitroCliErrorEnum::SocketError
)
})?;
}
// Serialize the command arguments.
if let Some(args) = args {
let mut arg_bytes = Vec::new();
ciborium::ser::into_writer(args, &mut arg_bytes).map_err(|e| {
new_nitro_cli_failure!(
&format!("Invalid single command arguments: {e:?}"),
NitroCliErrorEnum::InvalidCommand
)
})?;
// Write the serialized command arguments.
write_u64_le(&mut socket, arg_bytes.len() as u64)
.map_err(|e| e.add_subaction("Failed to send arguments size".to_string()))?;
socket.write_all(&arg_bytes).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to send arguments: {e:?}"),
NitroCliErrorEnum::SocketError
)
})?;
}
Ok(())
}
/// Receive an object of a specified type from an input stream.
pub fn receive_from_stream<T>(input_stream: &mut dyn Read) -> NitroCliResult<T>
where
T: DeserializeOwned,
{
let size = read_u64_le(input_stream)
.map_err(|e| e.add_subaction("Failed to receive data size".to_string()))?
as usize;
let mut raw_data: Vec<u8> = vec![0; size];
let data: T =
ciborium::de::from_reader_with_buffer(input_stream, &mut raw_data[..]).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to decode received data: {e:?}"),
NitroCliErrorEnum::SerdeError
)
})?;
Ok(data)
}
/// Get the path to the directory containing the Unix sockets owned by all enclave processes.
pub fn get_sockets_dir_path() -> PathBuf {
let log_path = match env::var(SOCKETS_DIR_PATH_ENV_VAR) {
Ok(env_path) => env_path,
Err(_) => SOCKETS_DIR_PATH.to_string(),
};
Path::new(&log_path).to_path_buf()
}
/// Get the path to the Unix socket owned by an enclave process which also owns the enclave with the given ID.
pub fn get_socket_path(enclave_id: &str) -> NitroCliResult<PathBuf> {
// The full enclave ID is "i-(...)-enc<enc_id>" and we want to extract only <enc_id>.
let tokens: Vec<_> = enclave_id.rsplit("-enc").collect();
let sockets_path = get_sockets_dir_path();
Ok(sockets_path.join(tokens[0]).with_extension("sock"))
}
#[cfg(test)]
mod tests {
#[allow(unused_imports)]
use super::*;
use crate::common::commands_parser::EmptyArgs;
const TMP_DIR_STR: &str = "./tmp_sock_dir";
fn unset_envvar(varname: &str) {
unsafe {
libc::unsetenv(varname.as_ptr() as *const c_char);
};
}
/// Tests that a value wrote by `write_u64_le()` is read
/// correctly by `read_u64_le()`.
#[test]
fn test_read_write_u64() {
let (mut sock0, mut sock1) = UnixStream::pair().unwrap();
let _ = write_u64_le(&mut sock0, 127);
let result = read_u64_le(&mut sock1);
if let Ok(result) = result {
assert_eq!(result, 127);
}
}
/// Tests that a command sent though a socket by `enclave_proc_command_send_single()`
/// is received correctly at the other end, by `receive_command_type()`.
#[test]
fn test_enclave_proc_command_send_single() {
let (mut sock0, mut sock1) = UnixStream::pair().unwrap();
let cmd = EnclaveProcessCommandType::Describe;
let args: std::option::Option<&EmptyArgs> = None;
let result0 = enclave_proc_command_send_single::<EmptyArgs>(cmd, args, &mut sock0);
assert!(result0.is_ok());
let result1 = receive_from_stream::<EnclaveProcessCommandType>(&mut sock1);
assert!(result1.is_ok());
assert_eq!(result1.unwrap(), EnclaveProcessCommandType::Describe);
}
/// Tests that the returned sockets_dir_path matches the expected path,
/// as retrieved from the corresponding environment variable.
#[test]
fn test_get_sockets_dir_path_default() {
let sockets_dir = env::var(SOCKETS_DIR_PATH_ENV_VAR);
let sockets_dir_path_f = get_sockets_dir_path();
if let Ok(sockets_dir) = sockets_dir {
assert_eq!(sockets_dir, sockets_dir_path_f.as_path().to_str().unwrap());
} else {
assert_eq!(
SOCKETS_DIR_PATH,
sockets_dir_path_f.as_path().to_str().unwrap()
);
}
}
/// Tests that altering the content of the sockets_dir_path environment variable
/// changes the sockets_dir_path string returned by `get_sockets_dir_path()`.
#[test]
fn test_get_sockets_dir_path_custom_envvar() {
let old_sockets_dir = env::var(SOCKETS_DIR_PATH_ENV_VAR);
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, TMP_DIR_STR);
let sockets_dir_path_f = get_sockets_dir_path();
assert_eq!(TMP_DIR_STR, sockets_dir_path_f.as_path().to_str().unwrap());
// Restore previous environment variable value
if let Ok(old_sockets_dir) = old_sockets_dir {
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, old_sockets_dir);
} else {
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, "");
unset_envvar(&String::from(SOCKETS_DIR_PATH_ENV_VAR));
}
}
/// Tests that `get_socket_path()` returns the expected socket path,
/// given a specific enclave id.
#[test]
fn test_get_socket_path_valid_id() {
let enclave_id = "i-0000000000000000-enc0123456789012345";
let tokens: Vec<_> = enclave_id.rsplit("-enc").collect();
let sockets_path = get_sockets_dir_path();
let result = get_socket_path(enclave_id);
assert!(result.is_ok());
assert_eq!(
result.unwrap().as_path().to_str().unwrap(),
format!(
"{}/{}.sock",
sockets_path.as_path().to_str().unwrap(),
tokens[0]
)
);
}
/// Tests that `get_socket_path()` returns an invalid socket path,
/// given a malformed enclave id.
#[test]
fn test_get_socket_path_invalid_id() {
let enclave_id = "i-0000000000000000_enc0123456789012345";
let sockets_path = get_sockets_dir_path();
let result = get_socket_path(enclave_id);
assert!(result.is_ok());
assert_eq!(
result.unwrap().as_path().to_str().unwrap(),
format!(
"{}/{}.sock",
sockets_path.as_path().to_str().unwrap(),
enclave_id
)
);
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc/connection.rs | src/enclave_proc/connection.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use log::{debug, warn};
use nix::sys::epoll::EpollFlags;
use nix::sys::socket::sockopt::PeerCredentials;
use nix::sys::socket::UnixCredentials;
use serde::de::DeserializeOwned;
use std::collections::HashMap;
use std::io::Write;
use std::os::unix::io::AsRawFd;
use std::os::unix::net::UnixStream;
use std::sync::{Arc, Mutex};
use crate::common::{receive_from_stream, write_u64_le};
use crate::common::{
EnclaveProcessCommandType, EnclaveProcessReply, ExitGracefully, NitroCliErrorEnum,
NitroCliFailure, NitroCliResult,
};
use crate::new_nitro_cli_failure;
/// The types of requesters which may send commands to the enclave process.
#[derive(PartialEq, Eq, Hash)]
enum CommandRequesterType {
/// The requester is the user with the given UID.
User(libc::uid_t),
/// The requester is the group with the given GID.
Group(libc::gid_t),
/// The requester is any other user.
Others,
}
/// The policy used to filter received commands based on the requester's type.
struct CommandRequesterPolicy {
/// A mapping between a requester's type and all of its allowed commands.
policy: HashMap<CommandRequesterType, Vec<EnclaveProcessCommandType>>,
}
/// Data held by a connection.
struct ConnectionData {
/// Flags received from `epoll` if this was an event-triggered connection.
epoll_flags: EpollFlags,
/// A communication stream with the peer, if this was a socket-triggered connection.
input_stream: Option<UnixStream>,
}
/// An enclave process connection to a CLI instance, an enclave or itself.
#[derive(Clone)]
pub struct Connection {
/// The thread-safe data used internally by the connection.
data: Arc<Mutex<ConnectionData>>,
}
impl Drop for ConnectionData {
fn drop(&mut self) {
if let Some(input_stream) = &self.input_stream {
// Close the stream.
input_stream
.shutdown(std::net::Shutdown::Both)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Stream shutdown error: {e:?}"),
NitroCliErrorEnum::SocketCloseError
)
})
.ok_or_exit_with_errno(Some("Failed to shut down"));
}
}
}
impl CommandRequesterPolicy {
/// Create a new `CommandRequesterPolicy` with the default rules. These rules allow
/// the user which spawned the enclave, together with `root`, to make any request,
/// whereas all other users are only allowed to make read-only requests (namely,
/// to describe an enclave or to read its CID).
fn new_with_defaults() -> Self {
let cmds_read_write = vec![
EnclaveProcessCommandType::Run,
EnclaveProcessCommandType::Terminate,
EnclaveProcessCommandType::TerminateComplete,
EnclaveProcessCommandType::Describe,
EnclaveProcessCommandType::GetEnclaveCID,
EnclaveProcessCommandType::GetEnclaveFlags,
EnclaveProcessCommandType::GetEnclaveName,
EnclaveProcessCommandType::GetIDbyName,
EnclaveProcessCommandType::ConnectionListenerStop,
];
let cmds_read_only = vec![
EnclaveProcessCommandType::Describe,
EnclaveProcessCommandType::GetEnclaveCID,
EnclaveProcessCommandType::GetEnclaveFlags,
EnclaveProcessCommandType::GetEnclaveName,
EnclaveProcessCommandType::GetIDbyName,
];
let mut policy = HashMap::new();
// The user which owns this enclave process may issue any command.
policy.insert(
CommandRequesterType::User(unsafe { libc::getuid() }),
cmds_read_write.clone(),
);
// The root user may issue any command.
policy.insert(CommandRequesterType::User(0_u32), cmds_read_write);
// All other users may only issue read-only commands.
policy.insert(CommandRequesterType::Others, cmds_read_only);
CommandRequesterPolicy { policy }
}
/// Find the policy rule which applies to the given requester and command.
fn find_policy_rule(
&self,
cmd: EnclaveProcessCommandType,
requester: &CommandRequesterType,
) -> bool {
match self.policy.get(requester) {
None => false,
Some(allowed_cmds) => allowed_cmds.contains(&cmd),
}
}
/// Check if the user with the specified credentials has permission to run the specified command.
fn can_execute_command(&self, cmd: EnclaveProcessCommandType, creds: &UnixCredentials) -> bool {
// Search for a policy rule on the provided user ID.
if self.find_policy_rule(cmd, &CommandRequesterType::User(creds.uid())) {
return true;
}
// Search for a policy rule on the provided group ID.
if self.find_policy_rule(cmd, &CommandRequesterType::Group(creds.gid())) {
return true;
}
// Search for a policy rule on all other users.
if self.find_policy_rule(cmd, &CommandRequesterType::Others) {
return true;
}
// If we haven't found any applicable policy rule we can't allow the command to be executed.
false
}
}
impl Connection {
/// Create a new connection instance.
pub fn new(epoll_flags: EpollFlags, input_stream: Option<UnixStream>) -> Self {
let conn_data = ConnectionData {
epoll_flags,
input_stream,
};
Connection {
data: Arc::new(Mutex::new(conn_data)),
}
}
/// Read a command and its corresponding credentials.
pub fn read_command(&self) -> NitroCliResult<EnclaveProcessCommandType> {
let mut lock = self.data.lock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to acquire lock: {e:?}"),
NitroCliErrorEnum::LockAcquireFailure
)
})?;
if lock.input_stream.is_none() {
return Err(new_nitro_cli_failure!(
"Cannot read a command from this connection",
NitroCliErrorEnum::UnusableConnectionError
));
}
// First, read the incoming command.
let mut cmd =
receive_from_stream::<EnclaveProcessCommandType>(lock.input_stream.as_mut().unwrap())?;
// Next, read the credentials of the command requester.
let conn_fd = lock.input_stream.as_ref().unwrap().as_raw_fd();
let socket_creds = nix::sys::socket::getsockopt(conn_fd, PeerCredentials);
// If the credentials cannot be read, the command will be skipped.
let user_creds = match socket_creds {
Ok(creds) => creds,
Err(e) => {
warn!("Failed to get user credentials: {}", e);
return Ok(EnclaveProcessCommandType::NotPermitted);
}
};
// Apply the default command access policy based on the user's credentials.
let policy = CommandRequesterPolicy::new_with_defaults();
if !policy.can_execute_command(cmd, &user_creds) {
// Log the failed execution attempt.
warn!(
"The requester with credentials ({:?}) is not allowed to perform '{:?}'.",
user_creds, cmd
);
// Force the command to be skipped by the main event loop.
cmd = EnclaveProcessCommandType::NotPermitted;
} else {
// Log the successful execution attempt.
debug!(
"The requester with credentials ({:?}) is allowed to perform '{:?}'.",
user_creds, cmd
);
}
Ok(cmd)
}
/// Read an object of the specified type from this connection.
pub fn read<T>(&self) -> NitroCliResult<T>
where
T: DeserializeOwned,
{
let mut lock = self.data.lock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to acquire lock: {e:?}"),
NitroCliErrorEnum::LockAcquireFailure
)
})?;
if lock.input_stream.is_none() {
return Err(new_nitro_cli_failure!(
"Cannot read from this connection",
NitroCliErrorEnum::SocketError
));
}
receive_from_stream::<T>(lock.input_stream.as_mut().unwrap())
}
/// Write a 64-bit unsigned value on this connection.
pub fn write_u64(&self, value: u64) -> NitroCliResult<()> {
let mut lock = self.data.lock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to acquire lock: {e:?}"),
NitroCliErrorEnum::LockAcquireFailure
)
})?;
if lock.input_stream.is_none() {
return Err(new_nitro_cli_failure!(
"Cannot write a 64-bit value to this connection",
NitroCliErrorEnum::SocketError
));
}
write_u64_le(lock.input_stream.as_mut().unwrap(), value)
}
/// Write a message to the standard output of the connection's other end.
pub fn println(&self, msg: &str) -> NitroCliResult<()> {
let mut msg_str = msg.to_string();
// Append a new-line at the end of the string.
msg_str.push('\n');
let reply = EnclaveProcessReply::StdOutMessage(msg_str);
self.write_reply(&reply)
}
/// Write a message to the standard error of the connection's other end.
pub fn eprintln(&self, msg: &str) -> NitroCliResult<()> {
let mut msg_str = msg.to_string();
// Append a new-line at the end of the string.
msg_str.push('\n');
let reply = EnclaveProcessReply::StdErrMessage(msg_str);
self.write_reply(&reply)
}
/// Write an operation's status to the connection's other end.
pub fn write_status(&self, status: i32) -> NitroCliResult<()> {
let reply = EnclaveProcessReply::Status(status);
self.write_reply(&reply)
}
/// Get the enclave event flags.
pub fn get_enclave_event_flags(&self) -> NitroCliResult<Option<EpollFlags>> {
let lock = self.data.lock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to acquire connection lock: {e:?}"),
NitroCliErrorEnum::LockAcquireFailure
)
})?;
match lock.input_stream {
None => Ok(Some(lock.epoll_flags)),
_ => Ok(None),
}
}
/// Write a string and its corresponding destination to a socket.
fn write_reply(&self, reply: &EnclaveProcessReply) -> NitroCliResult<()> {
let mut lock = self.data.lock().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to acquire lock: {e:?}"),
NitroCliErrorEnum::LockAcquireFailure
)
})?;
if lock.input_stream.is_none() {
return Err(new_nitro_cli_failure!(
"Cannot write message to connection",
NitroCliErrorEnum::SocketError
));
}
let mut stream = lock.input_stream.as_mut().unwrap();
let mut reply_bytes = Vec::new();
ciborium::into_writer(reply, &mut reply_bytes).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to serialize reply: {e:?}"),
NitroCliErrorEnum::SerdeError
)
})?;
write_u64_le(&mut stream, reply_bytes.len() as u64)
.map_err(|e| e.add_subaction("Write reply".to_string()))?;
stream.write_all(&reply_bytes).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to write to stream: {e:?}"),
NitroCliErrorEnum::SocketError
)
})
}
}
/// Print a message to a connection's standard output, if the connection is available.
pub fn safe_conn_println(conn: Option<&Connection>, msg: &str) -> NitroCliResult<()> {
if conn.is_none() {
return Ok(());
}
conn.unwrap().println(msg)
}
/// Print a message to a connection's standard error, if the connection is available.
pub fn safe_conn_eprintln(conn: Option<&Connection>, msg: &str) -> NitroCliResult<()> {
if conn.is_none() {
return Ok(());
}
conn.unwrap().eprintln(msg)
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc/utils.rs | src/enclave_proc/utils.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use std::fs::metadata;
use std::fs::File;
use std::io::Read;
use crate::common::json_output::{EnclaveDescribeInfo, EnclaveRunInfo, MetadataDescribeInfo};
use crate::common::{NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use crate::enclave_proc::resource_manager::EnclaveManager;
use crate::enclave_proc::resource_manager::NE_ENCLAVE_DEBUG_MODE;
use crate::new_nitro_cli_failure;
/// Kibibytes.
#[allow(non_upper_case_globals)]
pub const KiB: u64 = 1024;
/// Mebibytes.
#[allow(non_upper_case_globals)]
pub const MiB: u64 = 1024 * KiB;
/// Gibibytes.
#[allow(non_upper_case_globals)]
pub const GiB: u64 = 1024 * MiB;
/// Get a string representation of the bit-mask which holds the enclave launch flags.
pub fn flags_to_string(flags: u64) -> String {
if flags & NE_ENCLAVE_DEBUG_MODE == NE_ENCLAVE_DEBUG_MODE {
"DEBUG_MODE"
} else {
"NONE"
}
.to_string()
}
/// Obtain the enclave information requested by the `describe-enclaves` command.
pub fn get_enclave_describe_info(
enclave_manager: &EnclaveManager,
with_metadata: bool,
) -> NitroCliResult<EnclaveDescribeInfo> {
let (slot_uid, enclave_cid, cpu_count, cpu_ids, memory_mib, flags, state) =
enclave_manager.get_description_resources()?;
let mut describe_meta: Option<MetadataDescribeInfo> = None;
let mut img_name: Option<String> = None;
let mut img_version: Option<String> = None;
if with_metadata {
if let Some(meta) = enclave_manager.get_metadata()? {
img_name = Some(meta.img_name.clone());
img_version = Some(meta.img_version.clone());
describe_meta = Some(MetadataDescribeInfo::new(meta));
}
}
let info = EnclaveDescribeInfo {
enclave_name: Some(enclave_manager.enclave_name.clone()),
enclave_id: generate_enclave_id(slot_uid)?,
process_id: std::process::id(),
enclave_cid,
cpu_count,
cpu_ids,
memory_mib,
state: state.to_string(),
flags: flags_to_string(flags),
build_info: Some(enclave_manager.get_measurements()?),
img_name,
img_version,
metadata: describe_meta,
};
Ok(info)
}
/// Obtain the enclave information requested by the `run-enclaves` command.
pub fn get_run_enclaves_info(
enclave_name: String,
enclave_cid: u64,
slot_id: u64,
cpu_ids: Vec<u32>,
memory: u64,
) -> NitroCliResult<EnclaveRunInfo> {
let info = EnclaveRunInfo::new(
enclave_name,
generate_enclave_id(slot_id)?,
enclave_cid,
cpu_ids.len(),
cpu_ids,
memory,
);
Ok(info)
}
/// Generate a unique ID for a new enclave with the specified slot ID.
pub fn generate_enclave_id(slot_id: u64) -> NitroCliResult<String> {
let file_path = "/sys/devices/virtual/dmi/id/board_asset_tag";
if metadata(file_path).is_ok() {
let mut file = File::open(file_path).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to open file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![file_path, "Open"])
})?;
let mut contents = String::new();
file.read_to_string(&mut contents).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to read from file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![file_path, "Read"])
})?;
contents.retain(|c| !c.is_whitespace());
return Ok(format!("{contents}-enc{slot_id:x}"));
}
Ok(format!("i-0000000000000000-enc{slot_id:x}"))
}
/// Obtain an enclave's slot ID from its full ID.
pub fn get_slot_id(enclave_id: String) -> Result<u64, String> {
let tokens: Vec<&str> = enclave_id.split("-enc").collect();
match tokens.get(1) {
Some(slot_id) => {
u64::from_str_radix(slot_id, 16).map_err(|_err| "Invalid enclave id format".to_string())
}
None => Err("Invalid enclave_id.".to_string()),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_generate_enclave_id() {
let slot_id: u64 = 7;
let enc_id = generate_enclave_id(slot_id);
let file_path = "/sys/devices/virtual/dmi/id/board_asset_tag";
if metadata(file_path).is_err() {
assert!(enc_id
.unwrap()
.eq(&format!("i-0000000000000000-enc{slot_id:?}")));
} else {
assert!(!enc_id
.unwrap()
.split('-')
.collect::<Vec<&str>>()
.get(1)
.unwrap()
.eq(&"0000000000000000"));
}
}
#[test]
fn test_get_slot_id_valid() {
let slot_id: u64 = 8;
let enc_id = generate_enclave_id(slot_id);
if let Ok(enc_id) = enc_id {
let result = get_slot_id(enc_id);
assert!(result.is_ok());
assert_eq!(slot_id, result.unwrap());
}
}
#[test]
fn test_get_slot_id_invalid() {
let enclave_id = String::from("i-0000_enc1234");
let result = get_slot_id(enclave_id);
assert!(result.is_err());
if let Err(err_str) = result {
assert!(err_str.eq("Invalid enclave_id."));
}
}
/// Tests that `flags_to_string()` returns the correct String representation
/// when the NE_ENCLAVE_DEBUG_MODE is either set or unset.
#[test]
fn test_flags_to_string() {
let mut flags: u64 = 0;
flags |= NE_ENCLAVE_DEBUG_MODE;
let mut result = flags_to_string(flags);
assert!(result.eq("DEBUG_MODE"));
flags = 0;
result = flags_to_string(flags);
assert!(result.eq("NONE"));
}
/// Asserts that `get_run_enclaves_info()` returns a result containing
/// exactly the same values as the supplied arguments.
#[test]
fn test_get_run_enclaves_info() {
let enclave_name = "testName".to_string();
let enclave_cid: u64 = 0;
let slot_id: u64 = 7;
let cpu_ids: Vec<u32> = vec![1, 3];
let memory: u64 = 64;
let result =
get_run_enclaves_info(enclave_name, enclave_cid, slot_id, cpu_ids.clone(), memory);
assert!(result.is_ok());
if let Ok(result) = result {
assert_eq!(enclave_cid, result.enclave_cid);
assert_eq!(cpu_ids.len(), result.cpu_ids.len());
for (idx, cpu_id) in result.cpu_ids.iter().enumerate() {
assert_eq!(cpu_ids[idx], *cpu_id);
}
assert_eq!(memory, result.memory_mib);
}
}
/// Asserts that `get_enclave_id()` returns the expected enclave
/// id, which is obtained through a call to `get_run_enclaves_info()`.
#[test]
fn test_get_enclave_id() {
let enclave_name = "testName".to_string();
let enclave_cid: u64 = 0;
let slot_id: u64 = 8;
let cpu_ids: Vec<u32> = vec![1, 3];
let memory: u64 = 64;
let result =
get_run_enclaves_info(enclave_name, enclave_cid, slot_id, cpu_ids.clone(), memory);
assert!(result.is_ok());
if let Ok(result) = result {
let this_enclave_id = &result.enclave_id;
assert!(this_enclave_id.eq(&result.enclave_id));
assert_eq!(enclave_cid, result.enclave_cid);
assert_eq!(cpu_ids.len(), result.cpu_ids.len());
for (idx, cpu_id) in result.cpu_ids.iter().enumerate() {
assert_eq!(cpu_ids[idx], *cpu_id);
}
assert_eq!(memory, result.memory_mib);
}
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc/commands.rs | src/enclave_proc/commands.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use aws_nitro_enclaves_image_format::defs::EifIdentityInfo;
use aws_nitro_enclaves_image_format::utils::eif_reader::EifReader;
use aws_nitro_enclaves_image_format::utils::{get_pcrs, PcrSignatureChecker};
use log::debug;
use sha2::{Digest, Sha384};
use std::collections::BTreeMap;
use std::fs::File;
use std::thread::JoinHandle;
use crate::common::commands_parser::RunEnclavesArgs;
use crate::common::construct_error_message;
use crate::common::json_output::EnclaveTerminateInfo;
use crate::common::{NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use crate::enclave_proc::connection::Connection;
use crate::enclave_proc::connection::{safe_conn_eprintln, safe_conn_println};
use crate::enclave_proc::cpu_info::CpuInfo;
use crate::enclave_proc::resource_manager::{EnclaveManager, EnclaveState};
use crate::enclave_proc::utils::get_enclave_describe_info;
use crate::new_nitro_cli_failure;
/// Result of thread to calculate PCRs and fetch EIF metadata
pub struct DescribeThreadResult {
/// PCR measurements
pub measurements: BTreeMap<String, String>,
/// EIF metadata
pub metadata: Option<EifIdentityInfo>,
}
/// Thread handle from parallel computing of PCRs and fetching of metadata
pub type DescribeThread = Option<JoinHandle<NitroCliResult<DescribeThreadResult>>>;
/// Information returned by run_enclave function.
pub struct RunEnclaveResult {
/// Manager structure describing the enclave.
pub enclave_manager: EnclaveManager,
/// Handle of the thread that computes PCRs.
pub describe_thread: DescribeThread,
}
/// Launch an enclave with the specified arguments and provide the launch status through the given connection.
pub fn run_enclaves(
args: &RunEnclavesArgs,
connection: Option<&Connection>,
) -> NitroCliResult<RunEnclaveResult> {
debug!("run_enclaves");
let eif_file = File::open(&args.eif_path).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to open the EIF file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![&args.eif_path, "Open"])
})?;
let cpu_ids = CpuInfo::new()
.map_err(|e| e.add_subaction("Failed to construct CPU information".to_string()))?
.get_cpu_config(args)
.map_err(|e| e.add_subaction("Failed to get CPU configuration".to_string()))?;
let enclave_name = match &args.enclave_name {
Some(enclave_name) => enclave_name,
None => {
return Err(new_nitro_cli_failure!(
"Failed to set enclave name in the manager".to_string(),
NitroCliErrorEnum::FileOperationFailure
))
}
};
let mut enclave_manager = EnclaveManager::new(
args.enclave_cid,
args.memory_mib,
cpu_ids,
eif_file,
args.debug_mode,
enclave_name.to_string(),
)
.map_err(|e| {
e.add_subaction("Failed to construct EnclaveManager with given arguments".to_string())
})?;
let mut signature_checker = PcrSignatureChecker::from_eif(&args.eif_path).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to create EIF signature checker: {e:?}"),
NitroCliErrorEnum::EIFSignatureCheckerError
)
})?;
// Verify the certificate only if signature section exists
if !signature_checker.is_empty() {
signature_checker.verify().map_err(|e| {
new_nitro_cli_failure!(
&format!("Invalid signing certificate: {e:?}"),
NitroCliErrorEnum::EIFSignatureCheckerError
)
})?;
}
// Launch parallel computing of PCRs
let path = args.eif_path.clone();
let handle = std::thread::spawn(move || {
let mut eif_reader = EifReader::from_eif(path).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed initialize EIF reader: {e:?}"),
NitroCliErrorEnum::EifParsingError
)
})?;
let measurements = get_pcrs(
&mut eif_reader.image_hasher,
&mut eif_reader.bootstrap_hasher,
&mut eif_reader.app_hasher,
&mut eif_reader.cert_hasher,
Sha384::new(),
eif_reader.signature_section.is_some(),
)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to calculate PCRs: {e:?}"),
NitroCliErrorEnum::EifParsingError
)
})?;
Ok(DescribeThreadResult {
measurements,
metadata: eif_reader.get_metadata(),
})
});
enclave_manager
.run_enclave(connection)
.map_err(|e| e.add_subaction("Failed to run enclave".to_string()))?;
enclave_manager
.update_state(EnclaveState::Running)
.map_err(|e| e.add_subaction("Failed to update enclave state".to_string()))?;
Ok(RunEnclaveResult {
enclave_manager,
describe_thread: Some(handle),
})
}
/// Terminate an enclave and provide the termination status through the given connection.
pub fn terminate_enclaves(
enclave_manager: &mut EnclaveManager,
connection: Option<&Connection>,
) -> NitroCliResult<()> {
let enclave_id = enclave_manager.enclave_id.clone();
let enclave_name = Some(enclave_manager.enclave_name.clone());
debug!("terminate_enclaves");
enclave_manager
.update_state(EnclaveState::Terminating)
.map_err(|e| e.add_subaction("Failed to update enclave state".to_string()))?;
if let Err(error_info) = enclave_manager.terminate_enclave() {
safe_conn_eprintln(
connection,
format!(
"Warning: Failed to stop enclave {}\nError message: {:?}",
enclave_manager.enclave_id,
construct_error_message(&error_info).as_str()
)
.as_str(),
)?;
return Err(error_info);
}
enclave_manager.update_state(EnclaveState::Empty)?;
safe_conn_eprintln(
connection,
format!(
"Successfully terminated enclave {}.",
enclave_manager.enclave_id
)
.as_str(),
)?;
// We notify the CLI of the termination's status.
safe_conn_println(
connection,
serde_json::to_string_pretty(&EnclaveTerminateInfo::new(enclave_name, enclave_id, true))
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to display enclave termination data: {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?
.as_str(),
)
}
/// Obtain an enclave's description and provide it through the given connection.
pub fn describe_enclaves(
enclave_manager: &EnclaveManager,
connection: &Connection,
with_metadata: bool,
) -> NitroCliResult<()> {
debug!("describe_enclaves");
let info = get_enclave_describe_info(enclave_manager, with_metadata)
.map_err(|e| e.add_subaction(String::from("Execute Describe Enclave command")))?;
connection.println(
serde_json::to_string_pretty(&info)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to display enclave describe data: {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?
.as_str(),
)
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc/mod.rs | src/enclave_proc/mod.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
#![allow(clippy::too_many_arguments)]
/// The module which provides top-level enclave commands.
pub mod commands;
/// The module which provides a connection to the enclave process.
pub mod connection;
/// The module which provides an enclave socket monitor that listens for incoming connections.
pub mod connection_listener;
/// The module which provides CPU information utilities.
pub mod cpu_info;
/// The module which provides the enclave manager and its utilities.
pub mod resource_manager;
/// The module which provides the managed Unix socket needed to communicate with the enclave process.
pub mod socket;
/// The module which provides additional enclave process utilities.
pub mod utils;
use log::{info, warn};
use nix::sys::epoll::EpollFlags;
use nix::sys::signal::{Signal, SIGHUP};
use nix::unistd::{daemon, getpid, getppid};
use std::os::unix::io::{FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net::UnixStream;
use std::process;
use std::thread::{self, JoinHandle};
use super::common::MSG_ENCLAVE_CONFIRM;
use super::common::{construct_error_message, enclave_proc_command_send_single, notify_error};
use super::common::{
EnclaveProcessCommandType, ExitGracefully, NitroCliErrorEnum, NitroCliFailure, NitroCliResult,
};
use crate::common::commands_parser::{DescribeEnclavesArgs, EmptyArgs, RunEnclavesArgs};
use crate::common::logger::EnclaveProcLogWriter;
use crate::common::signal_handler::SignalHandler;
use crate::enclave_proc::connection::safe_conn_println;
use crate::new_nitro_cli_failure;
use commands::{describe_enclaves, run_enclaves, terminate_enclaves, DescribeThread};
use connection::Connection;
use connection_listener::ConnectionListener;
use resource_manager::EnclaveManager;
/// The type of enclave event that has been handled.
enum HandledEnclaveEvent {
/// A hang-up event.
HangUp,
/// An unexpected but non-critical event.
Unexpected,
/// There was no event that needed handling.
None,
}
/// Obtain the logger ID from the full enclave ID.
fn get_logger_id(enclave_id: &str) -> String {
// The full enclave ID is "i-(...)-enc<enc_id>" and we want to extract only <enc_id>.
let tokens: Vec<_> = enclave_id.rsplit("-enc").collect();
format!("enc-{}:{}", tokens[0], std::process::id())
}
/// Get the action associated with `cmd` as a String.
fn get_command_action(cmd: EnclaveProcessCommandType) -> String {
match cmd {
EnclaveProcessCommandType::Run => "Run Enclave".to_string(),
EnclaveProcessCommandType::Terminate | EnclaveProcessCommandType::TerminateComplete => {
"Terminate Enclave".to_string()
}
EnclaveProcessCommandType::Describe => "Describe Enclaves".to_string(),
EnclaveProcessCommandType::GetEnclaveCID => "Get Enclave CID".to_string(),
EnclaveProcessCommandType::GetEnclaveFlags => "Get Enclave Flags".to_string(),
EnclaveProcessCommandType::ConnectionListenerStop => "Stop Connection Listener".to_string(),
_ => "Unknown Command".to_string(),
}
}
/// Send the given command, then close the channel that was used for sending it.
fn send_command_and_close(cmd: EnclaveProcessCommandType, stream: &mut UnixStream) {
let action_str = &get_command_action(cmd);
enclave_proc_command_send_single::<EmptyArgs>(cmd, None, stream)
.ok_or_exit_with_errno(Some("Failed to send command"));
stream
.shutdown(std::net::Shutdown::Both)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to close stream after sending command: {e:?}"),
NitroCliErrorEnum::SocketCloseError
)
.set_action(action_str.to_string())
})
.ok_or_exit_with_errno(Some("Failed to shut down stream"));
}
/// Notify that an error has occurred, also forwarding the error message to a connection.
fn notify_error_with_conn(err_msg: &str, conn: &Connection, action: EnclaveProcessCommandType) {
let action_str = &get_command_action(action);
notify_error(err_msg);
conn.eprintln(err_msg)
.map_err(|e| e.set_action(action_str.to_string()))
.ok_or_exit_with_errno(Some("Failed to forward error message to connection"));
}
/// Perform enclave termination.
fn run_terminate(
connection: Connection,
mut thread_stream: UnixStream,
mut enclave_manager: EnclaveManager,
) {
terminate_enclaves(&mut enclave_manager, Some(&connection)).unwrap_or_else(|e| {
notify_error_with_conn(
construct_error_message(&e).as_str(),
&connection,
EnclaveProcessCommandType::Terminate,
);
});
// Notify the main thread that enclave termination has completed.
send_command_and_close(
EnclaveProcessCommandType::TerminateComplete,
&mut thread_stream,
);
}
/// Start enclave termination.
fn notify_terminate(
connection: Connection,
conn_listener: &ConnectionListener,
enclave_manager: EnclaveManager,
) -> NitroCliResult<JoinHandle<()>> {
let (local_stream, thread_stream) = UnixStream::pair().map_err(|e| {
new_nitro_cli_failure!(
&format!("Could not create stream pair: {e:?}"),
NitroCliErrorEnum::SocketPairCreationFailure
)
})?;
conn_listener.add_stream_to_epoll(local_stream)?;
Ok(thread::spawn(move || {
run_terminate(connection, thread_stream, enclave_manager)
}))
}
/// Launch the POSIX signal handler on a dedicated thread and ensure its events are accessible.
fn enclave_proc_configure_signal_handler(conn_listener: &ConnectionListener) -> NitroCliResult<()> {
let mut signal_handler = SignalHandler::new_with_defaults()
.mask_all()
.map_err(|e| e.add_subaction("Failed to configure signal handler".to_string()))?;
let (local_stream, thread_stream) = UnixStream::pair()
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to create stream pair: {e:?}"),
NitroCliErrorEnum::SocketPairCreationFailure
)
.set_action("Run Enclave".to_string())
})
.ok_or_exit_with_errno(Some("Failed to create stream pair"));
conn_listener
.add_stream_to_epoll(local_stream)
.map_err(|e| {
e.add_subaction(
"Failed to add stream to epoll when configuring signal handler".to_string(),
)
})?;
signal_handler.start_handler(thread_stream.into_raw_fd(), enclave_proc_handle_signals);
Ok(())
}
/// The default POSIX signal handling function, which notifies the enclave process to shut down gracefully.
fn enclave_proc_handle_signals(comm_fd: RawFd, signal: Signal) -> bool {
let mut stream = unsafe { UnixStream::from_raw_fd(comm_fd) };
warn!(
"Received signal {:?}. The enclave process will now close.",
signal
);
send_command_and_close(
EnclaveProcessCommandType::ConnectionListenerStop,
&mut stream,
);
true
}
/// Handle an event coming from an enclave.
fn try_handle_enclave_event(connection: &Connection) -> NitroCliResult<HandledEnclaveEvent> {
// Check if this is an enclave connection.
if let Some(mut enc_events) = connection
.get_enclave_event_flags()
.map_err(|e| e.add_subaction("Failed to get enclave events flag".to_string()))?
{
let enc_hup = enc_events.contains(EpollFlags::EPOLLHUP);
// Check if non-hang-up events have occurred.
enc_events.remove(EpollFlags::EPOLLHUP);
if !enc_events.is_empty() {
warn!("Received unexpected enclave event(s): {:?}", enc_events);
}
// If we received the hang-up event we need to terminate cleanly.
if enc_hup {
warn!("Received hang-up event from the enclave. Enclave process will shut down.");
return Ok(HandledEnclaveEvent::HangUp);
}
// Non-hang-up enclave events are not fatal.
return Ok(HandledEnclaveEvent::Unexpected);
}
Ok(HandledEnclaveEvent::None)
}
/// Fetch result of describe thread which was started during start of enclave
/// After result is fetched and stored to enclave manager thread is set to None
/// Thus, actual fetching happens only the first time after enclave is started
fn fetch_describe_result(
describe_thread: &mut DescribeThread,
enclave_manager: &mut EnclaveManager,
) -> NitroCliResult<()> {
if let Some(thread) = describe_thread.take() {
let result = thread
.join()
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Termination thread join failed: {e:?}"),
NitroCliErrorEnum::ThreadJoinFailure
)
})?
.map_err(|e| e.add_subaction("Failed to save PCR values".to_string()))?;
enclave_manager
.set_measurements(result.measurements)
.map_err(|e| {
e.add_subaction("Failed to set measurements inside enclave handle.".to_string())
})?;
if let Some(metadata) = result.metadata {
enclave_manager.set_metadata(metadata).map_err(|e| {
e.add_subaction("Failed to set metadata inside enclave handle.".to_string())
})?;
}
}
Ok(())
}
/// Handle a single command, returning whenever an error occurs.
fn handle_command(
cmd: EnclaveProcessCommandType,
logger: &EnclaveProcLogWriter,
connection: &Connection,
conn_listener: &mut ConnectionListener,
enclave_manager: &mut EnclaveManager,
terminate_thread: &mut Option<std::thread::JoinHandle<()>>,
describe_thread: &mut DescribeThread,
) -> NitroCliResult<(i32, bool)> {
Ok(match cmd {
EnclaveProcessCommandType::Run => {
// We should never receive a Run command if we are already running.
if !enclave_manager.enclave_id.is_empty() {
(libc::EEXIST, false)
} else {
let run_args = connection.read::<RunEnclavesArgs>().map_err(|e| {
e.add_subaction("Failed to get run arguments".to_string())
.set_action("Run Enclave".to_string())
})?;
info!("Run args = {:?}", run_args);
let run_result = run_enclaves(&run_args, Some(connection)).map_err(|e| {
e.add_subaction("Failed to trigger enclave run".to_string())
.set_action("Run Enclave".to_string())
})?;
*enclave_manager = run_result.enclave_manager;
*describe_thread = run_result.describe_thread;
info!("Enclave ID = {}", enclave_manager.enclave_id);
logger
.update_logger_id(&get_logger_id(&enclave_manager.enclave_id))
.map_err(|e| e.set_action("Failed to update logger ID".to_string()))?;
conn_listener
.start(&enclave_manager.enclave_id)
.map_err(|e| {
e.set_action("Failed to start connection listener thread".to_string())
})?;
// Add the enclave descriptor to epoll to listen for enclave events.
let enc_fd = enclave_manager
.get_enclave_descriptor()
.map_err(|e| e.set_action("Failed to get enclave descriptor".to_string()))?;
conn_listener
.register_enclave_descriptor(enc_fd)
.map_err(|e| {
e.set_action("Failed to register enclave descriptor".to_string())
})?;
(0, false)
}
}
EnclaveProcessCommandType::Terminate => {
*terminate_thread = Some(
notify_terminate(connection.clone(), conn_listener, enclave_manager.clone())
.map_err(|e| {
e.set_action("Failed to send enclave termination request".to_string())
})?,
);
(0, false)
}
EnclaveProcessCommandType::TerminateComplete => {
info!("Enclave has completed termination.");
(0, true)
}
EnclaveProcessCommandType::GetEnclaveCID => {
let enclave_cid = enclave_manager
.get_console_resources_enclave_cid()
.map_err(|e| {
e.set_action("Failed to get console resources (enclave CID)".to_string())
})?;
connection.write_u64(enclave_cid).map_err(|e| {
e.add_subaction("Failed to write enclave CID to connection".to_string())
.set_action("Get Enclave CID".to_string())
})?;
(0, false)
}
EnclaveProcessCommandType::GetEnclaveFlags => {
let enclave_flags = enclave_manager
.get_console_resources_enclave_flags()
.map_err(|e| {
e.set_action("Failed to get console resources (enclave flags)".to_string())
})?;
connection.write_u64(enclave_flags).map_err(|e| {
e.add_subaction("Failed to write enclave flags to connection".to_string())
.set_action("Get Enclave Flags".to_string())
})?;
(0, false)
}
EnclaveProcessCommandType::GetEnclaveName => {
connection.write_u64(MSG_ENCLAVE_CONFIRM).map_err(|e| {
e.add_subaction("Failed to write confirmation".to_string())
.set_action("Get Enclave Name".to_string())
})?;
safe_conn_println(
Some(connection),
serde_json::to_string_pretty(&enclave_manager.enclave_name)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to write enclave name to connection: {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?
.as_str(),
)?;
(0, false)
}
EnclaveProcessCommandType::GetIDbyName => {
connection.write_u64(MSG_ENCLAVE_CONFIRM).map_err(|e| {
e.add_subaction("Failed to write confirmation".to_string())
.set_action("Name to ID".to_string())
})?;
let name = connection.read::<String>().map_err(|e| {
e.add_subaction("Failed to get enclave name".to_string())
.set_action("Name to ID".to_string())
})?;
// Respond only if the current enclave name matches
if enclave_manager.enclave_name == name {
safe_conn_println(
Some(connection),
serde_json::to_string_pretty(&enclave_manager.enclave_id)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to display RunEnclaves data: {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?
.as_str(),
)?;
}
(0, false)
}
EnclaveProcessCommandType::Describe => {
let describe_args = connection.read::<DescribeEnclavesArgs>().map_err(|e| {
e.add_subaction("Failed to get describe arguments".to_string())
.set_action("Describe Enclave".to_string())
})?;
connection.write_u64(MSG_ENCLAVE_CONFIRM).map_err(|e| {
e.add_subaction("Failed to write confirmation".to_string())
.set_action("Describe Enclaves".to_string())
})?;
// Evaluate describe thread result if needed
fetch_describe_result(describe_thread, enclave_manager)?;
describe_enclaves(enclave_manager, connection, describe_args.metadata).map_err(
|e| {
e.add_subaction("Failed to describe enclave".to_string())
.set_action("Describe Enclaves".to_string())
},
)?;
(0, false)
}
EnclaveProcessCommandType::ConnectionListenerStop => (0, true),
EnclaveProcessCommandType::NotPermitted => (libc::EACCES, false),
})
}
/// The main event loop of the enclave process.
fn process_event_loop(
comm_stream: UnixStream,
logger: &EnclaveProcLogWriter,
) -> NitroCliResult<()> {
let mut conn_listener = ConnectionListener::new()?;
let mut enclave_manager = EnclaveManager::default();
let mut terminate_thread: Option<std::thread::JoinHandle<()>> = None;
let mut describe_thread: DescribeThread = None;
let mut done = false;
let mut ret_value = Ok(());
// Start the signal handler before spawning any other threads. This is done since the
// handler will mask all relevant signals from the current thread and this setting will
// be automatically inherited by all threads spawned from this point on; we want this
// because only the dedicated thread spawned by the handler should listen for signals.
enclave_proc_configure_signal_handler(&conn_listener)
.map_err(|e| e.add_subaction("Failed to configure signal handler".to_string()))?;
// Add the CLI communication channel to epoll.
conn_listener
.handle_new_connection(comm_stream)
.map_err(|e| {
e.add_subaction("Failed to add CLI communication channel to epoll".to_string())
})?;
while !done {
// We can get connections to CLI instances, to the enclave or to ourselves.
let connection =
conn_listener.get_next_connection(enclave_manager.get_enclave_descriptor().ok())?;
// If this is an enclave event, handle it.
match try_handle_enclave_event(&connection) {
Ok(HandledEnclaveEvent::HangUp) => break,
Ok(HandledEnclaveEvent::Unexpected) => continue,
Ok(HandledEnclaveEvent::None) => (),
Err(error_info) => {
ret_value = Err(error_info
.add_subaction("Error while trying to handle enclave event".to_string()));
break;
}
}
// At this point we have a connection that is not coming from an enclave.
// Read the command that should be executed.
let cmd = match connection.read_command() {
Ok(value) => value,
Err(mut error_info) => {
error_info = error_info
.add_subaction("Failed to read command".to_string())
.set_action("Run Enclave".to_string());
notify_error_with_conn(
&construct_error_message(&error_info),
&connection,
EnclaveProcessCommandType::NotPermitted,
);
break;
}
};
info!("Received command: {:?}", cmd);
let status = handle_command(
cmd,
logger,
&connection,
&mut conn_listener,
&mut enclave_manager,
&mut terminate_thread,
&mut describe_thread,
);
// Obtain the status code and whether the event loop must be exited.
let (status_code, do_break) = match status {
Ok(value) => value,
Err(mut error_info) => {
// Any encountered error is both logged and send to the other side of the connection.
error_info = error_info
.add_subaction(format!("Failed to execute command `{cmd:?}`"))
.set_action("Run Enclave".to_string());
notify_error_with_conn(&construct_error_message(&error_info), &connection, cmd);
(libc::EINVAL, true)
}
};
done = do_break;
// Perform clean-up and stop the connection listener before returning the status to the CLI.
// This is done to avoid race conditions where the enclave process has not yet removed the
// socket and another CLI issues a command on that very-soon-to-be-removed socket.
if done {
// Stop the connection listener.
conn_listener.stop()?;
// Wait for the termination thread, if any.
if terminate_thread.is_some() {
terminate_thread.take().unwrap().join().map_err(|e| {
new_nitro_cli_failure!(
&format!("Termination thread join failed: {e:?}"),
NitroCliErrorEnum::ThreadJoinFailure
)
})?;
};
}
// Only the commands coming from the CLI must be replied to with the status code.
match cmd {
EnclaveProcessCommandType::Run
| EnclaveProcessCommandType::Terminate
| EnclaveProcessCommandType::Describe
| EnclaveProcessCommandType::GetEnclaveName
| EnclaveProcessCommandType::GetIDbyName => {
connection.write_status(status_code).map_err(|_| {
new_nitro_cli_failure!(
"Process event loop failed",
NitroCliErrorEnum::EnclaveProcessSendReplyFailure
)
})?
}
_ => (),
}
}
info!("Enclave process {} exited event loop.", process::id());
ret_value
}
/// Create the enclave process.
fn create_enclave_process(logger: &EnclaveProcLogWriter) -> NitroCliResult<()> {
// To get a detached process, we first:
// (1) Temporarily ignore specific signals (SIGHUP).
// (2) Daemonize the current process.
// (3) Wait until the detached process is orphaned.
// (4) Restore signal handlers.
let signal_handler = SignalHandler::new(&[SIGHUP])
.mask_all()
.map_err(|e| e.add_subaction("Failed to mask signals".to_string()))?;
let ppid = getpid();
// Daemonize the current process. The working directory remains
// unchanged and the standard descriptors are routed to '/dev/null'.
daemon(true, false).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to daemonize enclave process: {e:?}"),
NitroCliErrorEnum::DaemonizeProcessFailure
)
})?;
// This is our detached process.
logger
.update_logger_id(format!("enc-xxxxxxx:{}", std::process::id()).as_str())
.map_err(|e| e.add_subaction("Failed to update logger id".to_string()))?;
info!("Enclave process PID: {}", process::id());
// We must wait until we're 100% orphaned. That is, our parent must
// no longer be the pre-fork process.
while getppid() == ppid {
thread::sleep(std::time::Duration::from_millis(10));
}
// Restore signal handlers.
signal_handler
.unmask_all()
.map_err(|e| e.add_subaction("Failed to restore signal handlers".to_string()))?;
Ok(())
}
/// Launch the enclave process.
///
/// * `comm_fd` - A descriptor used for initial communication with the parent Nitro CLI instance.
/// * `logger` - The current log writer, whose ID gets updated when an enclave is launched.
pub fn enclave_process_run(comm_stream: UnixStream, logger: &EnclaveProcLogWriter) {
create_enclave_process(logger)
.map_err(|e| e.set_action("Run Enclave".to_string()))
.ok_or_exit_with_errno(None);
let res = process_event_loop(comm_stream, logger);
if let Err(mut error_info) = res {
error_info = error_info.set_action("Run Enclave".to_string());
notify_error(construct_error_message(&error_info).as_str());
process::exit(error_info.error_code as i32);
}
process::exit(0);
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc/resource_manager.rs | src/enclave_proc/resource_manager.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
#![allow(unknown_lints)]
#![allow(deref_nullptr)]
use aws_nitro_enclaves_image_format::defs::EifIdentityInfo;
use driver_bindings::*;
use eif_loader::{enclave_ready, TIMEOUT_MINUTE_MS};
use libc::c_int;
use log::{debug, info};
use std::collections::BTreeMap;
use std::fs::{File, OpenOptions};
use std::io::prelude::*;
use std::io::Error;
use std::mem::size_of;
use std::os::unix::io::{AsRawFd, RawFd};
use std::str;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use vsock::{VsockAddr, VsockListener};
use crate::common::json_output::EnclaveBuildInfo;
use crate::common::{construct_error_message, notify_error};
use crate::common::{
ExitGracefully, NitroCliErrorEnum, NitroCliFailure, NitroCliResult, ENCLAVE_READY_VSOCK_PORT,
VMADDR_CID_PARENT,
};
use crate::enclave_proc::connection::Connection;
use crate::enclave_proc::connection::{safe_conn_eprintln, safe_conn_println};
use crate::enclave_proc::cpu_info::EnclaveCpuConfig;
use crate::enclave_proc::utils::get_run_enclaves_info;
use crate::enclave_proc::utils::{GiB, MiB};
use crate::new_nitro_cli_failure;
use crate::utils::ceil_div;
/// CamelCase alias for the bindgen generated driver struct (ne_enclave_start_info).
pub type EnclaveStartInfo = ne_enclave_start_info;
/// CamelCase alias for the bindgen generated driver struct (ne_user_memory_region).
pub type UserMemoryRegion = ne_user_memory_region;
/// CamelCase alias for the bindgen generate struct (ne_image_load_info).
pub type ImageLoadInfo = ne_image_load_info;
/// The internal data type needed for describing an enclave.
type UnpackedHandle = (u64, u64, u64, Vec<u32>, u64, u64, EnclaveState);
/// The bit indicating if an enclave has been launched in debug mode.
pub const NE_ENCLAVE_DEBUG_MODE: u64 = 0x1;
/// Constant number used for computing the lower memory limit.
const ENCLAVE_MEMORY_EIF_SIZE_RATIO: u64 = 4;
/// Enclave Image Format (EIF) flag.
const NE_EIF_IMAGE: u64 = 0x01;
/// Flag indicating a memory region for enclave general usage.
const NE_DEFAULT_MEMORY_REGION: u64 = 0;
/// Magic number for Nitro Enclave IOCTL codes.
const NE_MAGIC: u64 = 0xAE;
/// Path corresponding to the Nitro Enclaves device file.
const NE_DEV_FILEPATH: &str = "/dev/nitro_enclaves";
/// IOCTL code for `NE_CREATE_VM`.
pub const NE_CREATE_VM: u64 = nix::request_code_read!(NE_MAGIC, 0x20, size_of::<u64>()) as _;
/// IOCTL code for `NE_ADD_VCPU`.
pub const NE_ADD_VCPU: u64 = nix::request_code_readwrite!(NE_MAGIC, 0x21, size_of::<u32>()) as _;
/// IOCTL code for `NE_GET_IMAGE_LOAD_INFO`.
pub const NE_GET_IMAGE_LOAD_INFO: u64 =
nix::request_code_readwrite!(NE_MAGIC, 0x22, size_of::<ImageLoadInfo>()) as _;
/// IOCTL code for `NE_SET_USER_MEMORY_REGION`.
pub const NE_SET_USER_MEMORY_REGION: u64 =
nix::request_code_write!(NE_MAGIC, 0x23, size_of::<MemoryRegion>()) as _;
/// IOCTL code for `NE_START_ENCLAVE`.
pub const NE_START_ENCLAVE: u64 =
nix::request_code_readwrite!(NE_MAGIC, 0x24, size_of::<EnclaveStartInfo>()) as _;
/// Mapping between hugepage size and allocation flag, in descending order of size.
const HUGE_PAGE_MAP: [(libc::c_int, u64); 9] = [
(libc::MAP_HUGE_16GB, 16 * GiB),
(libc::MAP_HUGE_2GB, 2 * GiB),
(libc::MAP_HUGE_1GB, GiB),
(libc::MAP_HUGE_512MB, 512 * MiB),
(libc::MAP_HUGE_256MB, 256 * MiB),
(libc::MAP_HUGE_32MB, 32 * MiB),
(libc::MAP_HUGE_16MB, 16 * MiB),
(libc::MAP_HUGE_8MB, 8 * MiB),
(libc::MAP_HUGE_2MB, 2 * MiB),
];
/// A memory region used by the enclave memory allocator.
#[derive(Clone, Debug)]
pub struct MemoryRegion {
/// Flags to determine the usage for the memory region.
flags: u64,
/// The region's size in bytes.
mem_size: u64,
/// The region's virtual address.
mem_addr: u64,
}
/// The state an enclave may be in.
#[derive(Clone, Default)]
pub enum EnclaveState {
#[default]
/// The enclave is not running (it's either not started or has been terminated).
Empty,
/// The enclave is running.
Running,
/// The enclave is in the process of terminating.
Terminating,
}
/// Helper structure to allocate memory resources needed by an enclave.
#[derive(Clone, Default)]
struct ResourceAllocator {
/// The requested memory size in bytes.
requested_mem: u64,
/// The memory regions that have actually been allocated.
mem_regions: Vec<MemoryRegion>,
}
/// Helper structure for managing an enclave's resources.
#[derive(Default)]
struct EnclaveHandle {
/// The CPU configuration as requested by the user.
#[allow(dead_code)]
cpu_config: EnclaveCpuConfig,
/// List of CPU IDs provided to the enclave.
cpu_ids: Vec<u32>,
/// Amount of memory allocated for the enclave, in MB.
allocated_memory_mib: u64,
/// The enclave slot ID.
slot_uid: u64,
/// The enclave CID.
enclave_cid: Option<u64>,
/// Enclave flags (including the enclave debug mode flag).
flags: u64,
/// The driver-provided enclave descriptor.
enc_fd: RawFd,
/// The allocator used to manage enclave memory.
resource_allocator: ResourceAllocator,
/// The enclave image file.
eif_file: Option<File>,
/// The current state the enclave is in.
state: EnclaveState,
/// PCR values.
build_info: EnclaveBuildInfo,
/// EIF metadata
metadata: Option<EifIdentityInfo>,
}
/// The structure which manages an enclave in a thread-safe manner.
#[derive(Clone, Default)]
pub struct EnclaveManager {
/// The full ID of the managed enclave.
pub enclave_id: String,
/// Name of the managed enclave.
pub enclave_name: String,
/// A thread-safe handle to the enclave's resources.
enclave_handle: Arc<Mutex<EnclaveHandle>>,
}
impl std::fmt::Display for EnclaveState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
EnclaveState::Empty => write!(f, "EMPTY"),
EnclaveState::Running => write!(f, "RUNNING"),
EnclaveState::Terminating => write!(f, "TERMINATING"),
}
}
}
impl Default for EnclaveBuildInfo {
fn default() -> Self {
EnclaveBuildInfo::new(BTreeMap::new())
}
}
/// Construct a UserMemoryRegion object from a MemoryRegion instance.
/// Implementing the `From` trait automatically gives access to an
/// implementation of `Into` which can be used for a MemoryRegion instance.
impl From<&MemoryRegion> for UserMemoryRegion {
fn from(mem_reg: &MemoryRegion) -> UserMemoryRegion {
UserMemoryRegion {
flags: mem_reg.flags,
memory_size: mem_reg.mem_size,
userspace_addr: mem_reg.mem_addr,
}
}
}
impl MemoryRegion {
/// Create a new `MemoryRegion` instance with the specified size (in bytes).
pub fn new(hugepage_flag: libc::c_int) -> NitroCliResult<Self> {
let region_index = HUGE_PAGE_MAP
.iter()
.position(|&page_info| page_info.0 == hugepage_flag)
.ok_or_else(|| {
new_nitro_cli_failure!(
&format!("Failed to find huge page entry for flag {hugepage_flag:X?}"),
NitroCliErrorEnum::NoSuchHugepageFlag
)
})?;
let region_size = HUGE_PAGE_MAP[region_index].1;
let addr = unsafe {
libc::mmap(
std::ptr::null_mut(),
region_size as usize,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS | libc::MAP_HUGETLB | hugepage_flag,
-1,
0,
)
};
if addr == libc::MAP_FAILED {
return Err(new_nitro_cli_failure!(
"Failed to map memory",
NitroCliErrorEnum::EnclaveMmapError
));
}
// Record the allocated region.
Ok(MemoryRegion {
flags: NE_DEFAULT_MEMORY_REGION,
mem_size: region_size,
mem_addr: addr as u64,
})
}
/// Create a new `MemoryRegion` instance with the specified values.
pub fn new_with(flags: u64, mem_addr: u64, mem_size: u64) -> Self {
MemoryRegion {
flags,
mem_size,
mem_addr,
}
}
/// Free the memory region, if it has been allocated earlier.
fn free(&mut self) -> NitroCliResult<()> {
// Do nothing if the region has already been freed.
if self.mem_addr == 0 {
return Ok(());
}
let rc =
unsafe { libc::munmap(self.mem_addr as *mut libc::c_void, self.mem_size as usize) };
if rc < 0 {
return Err(new_nitro_cli_failure!(
"Failed to unmap memory",
NitroCliErrorEnum::EnclaveMunmapError
));
}
// Set the address and length to 0 to avoid double-freeing.
self.mem_addr = 0;
self.mem_size = 0;
Ok(())
}
/// Write the content from a file into memory at a given offset.
fn fill_from_file(
&self,
file: &mut File,
region_offset: usize,
size: usize,
) -> NitroCliResult<()> {
let offset_plus_size = region_offset.checked_add(size).ok_or_else(|| {
new_nitro_cli_failure!(
"Memory overflow when writing EIF file to region",
NitroCliErrorEnum::MemoryOverflow
)
})?;
if offset_plus_size > self.mem_size as usize {
return Err(new_nitro_cli_failure!(
"Out of region",
NitroCliErrorEnum::MemoryOverflow
));
}
let bytes = unsafe {
std::slice::from_raw_parts_mut(self.mem_addr as *mut u8, self.mem_size as usize)
};
file.read_exact(&mut bytes[region_offset..region_offset + size])
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Error while reading from enclave image: {e:?}"),
NitroCliErrorEnum::EifParsingError
)
})?;
Ok(())
}
/// Get the virtual address of the memory region.
pub fn mem_addr(&self) -> u64 {
self.mem_addr
}
/// Get the size in bytes of the memory region.
pub fn mem_size(&self) -> u64 {
self.mem_size
}
}
impl Drop for MemoryRegion {
fn drop(&mut self) {
self.free()
.ok_or_exit_with_errno(Some("Failed to drop memory region"));
}
}
impl ResourceAllocator {
/// Create a new `ResourceAllocator` instance which must cover at least the requested amount of memory (in bytes).
fn new(requested_mem: u64) -> NitroCliResult<Self> {
if requested_mem == 0 {
return Err(new_nitro_cli_failure!(
"Cannot start an enclave with no memory",
NitroCliErrorEnum::InsufficientMemoryRequested
)
.add_info(vec!["memory", &(requested_mem >> 20).to_string()]));
}
Ok(ResourceAllocator {
requested_mem,
mem_regions: Vec::new(),
})
}
/// Allocate and provide a list of memory regions. This function creates a list of
/// memory regions which contain at least `self.requested_mem` bytes. Each region
/// is equivalent to a huge-page and is allocated using memory mapping.
fn allocate(&mut self) -> NitroCliResult<&Vec<MemoryRegion>> {
let mut allocated_pages = BTreeMap::<u64, u32>::new();
let mut needed_mem = self.requested_mem as i64;
let mut split_index = 0;
info!(
"Allocating memory regions to hold {} bytes.",
self.requested_mem
);
// Always allocate larger pages first, to reduce fragmentation and page count.
// Once an allocation of a given page size fails, proceed to the next smaller
// page size and retry.
for page_info in HUGE_PAGE_MAP.iter() {
while needed_mem >= page_info.1 as i64 {
match MemoryRegion::new(page_info.0) {
Ok(value) => {
needed_mem -= value.mem_size as i64;
self.mem_regions.push(value);
}
Err(_) => break,
}
}
}
// If the user requested exactly the amount of memory that was reserved earlier,
// we should be left with no more memory that needs allocation. But if the user
// requests a smaller amount, we must then aim to reduce waster memory from
// larger-page allocations (Ex: if we have 1 x 1 GB page and 1 x 2 MB page, but
// we want to allocate only 512 MB, the above algorithm will have allocated only
// the 2 MB page, since the 1 GB page was too large for what was needed; we now
// need to allocate in increasing order of page size in order to reduce wastage).
if needed_mem > 0 {
for page_info in HUGE_PAGE_MAP.iter().rev() {
while needed_mem > 0 {
match MemoryRegion::new(page_info.0) {
Ok(value) => {
needed_mem -= value.mem_size as i64;
self.mem_regions.push(value);
}
Err(_) => break,
}
}
}
}
// If we still have memory to allocate, it means we have insufficient resources.
if needed_mem > 0 {
return Err(new_nitro_cli_failure!(
&format!(
"Failed to allocate entire memory ({} MB remained)",
needed_mem >> 20
),
NitroCliErrorEnum::InsufficientMemoryAvailable
)
.add_info(vec!["memory", &(self.requested_mem >> 20).to_string()]));
}
// At this point, we may have allocated more than we need, so we release all
// regions we no longer need, starting with the smallest ones.
self.mem_regions
.sort_by(|reg1, reg2| reg2.mem_size.cmp(®1.mem_size));
needed_mem = self.requested_mem as i64;
for region in self.mem_regions.iter() {
if needed_mem <= 0 {
break;
}
needed_mem -= region.mem_size as i64;
split_index += 1
}
// The regions that we no longer need are freed automatically on draining, since
// MemRegion implements Drop.
self.mem_regions.drain(split_index..);
// Generate a summary of the allocated memory.
for region in self.mem_regions.iter() {
if let Some(page_count) = allocated_pages.get_mut(®ion.mem_size) {
*page_count += 1;
} else {
allocated_pages.insert(region.mem_size, 1);
}
}
info!(
"Allocated {} region(s): {}",
self.mem_regions.len(),
allocated_pages
.iter()
.map(|(size, count)| format!("{} page(s) of {} MB", count, size >> 20))
.collect::<Vec<String>>()
.join(", ")
);
Ok(&self.mem_regions)
}
/// Free all previously-allocated memory regions.
fn free(&mut self) -> NitroCliResult<()> {
for region in self.mem_regions.iter_mut() {
region
.free()
.map_err(|e| e.add_subaction("Failed to free enclave memory region".to_string()))?;
}
self.mem_regions.clear();
Ok(())
}
}
impl Drop for ResourceAllocator {
fn drop(&mut self) {
self.free()
.ok_or_exit_with_errno(Some("Failed to drop resource allocator"));
}
}
impl EnclaveHandle {
/// Create a new enclave handle instance.
fn new(
enclave_cid: Option<u64>,
memory_mib: u64,
cpu_config: EnclaveCpuConfig,
eif_file: File,
debug_mode: bool,
) -> NitroCliResult<Self> {
let requested_mem = memory_mib << 20;
let eif_size = eif_file
.metadata()
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to get enclave image file metadata: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
})?
.len();
if ENCLAVE_MEMORY_EIF_SIZE_RATIO * eif_size > requested_mem {
return Err(new_nitro_cli_failure!(
&format!(
"At least {} MB must be allocated (which is {} times the EIF file size)",
ceil_div(ceil_div(eif_size, 1024), 1024) * ENCLAVE_MEMORY_EIF_SIZE_RATIO,
ENCLAVE_MEMORY_EIF_SIZE_RATIO
),
NitroCliErrorEnum::InsufficientMemoryRequested
)
.add_info(vec![
"memory",
&memory_mib.to_string(),
&(ceil_div(ceil_div(eif_size, 1024), 1024) * ENCLAVE_MEMORY_EIF_SIZE_RATIO)
.to_string(),
]));
}
// Open the device file.
let dev_file = OpenOptions::new()
.read(true)
.write(true)
.open(NE_DEV_FILEPATH)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to open device file: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![NE_DEV_FILEPATH, "Open"])
})?;
let mut slot_uid: u64 = 0;
let enc_fd = EnclaveHandle::do_ioctl(dev_file.as_raw_fd(), NE_CREATE_VM, &mut slot_uid)
.map_err(|e| e.add_subaction("Create VM ioctl failed".to_string()))?;
let flags: u64 = if debug_mode { NE_ENCLAVE_DEBUG_MODE } else { 0 };
if enc_fd < 0 {
return Err(new_nitro_cli_failure!(
&format!("Invalid enclave file descriptor ({enc_fd})"),
NitroCliErrorEnum::InvalidEnclaveFd
));
}
Ok(EnclaveHandle {
cpu_config,
cpu_ids: vec![],
allocated_memory_mib: 0,
slot_uid,
enclave_cid,
flags,
enc_fd,
resource_allocator: ResourceAllocator::new(requested_mem)
.map_err(|e| e.add_subaction("Create resource allocator".to_string()))?,
eif_file: Some(eif_file),
state: EnclaveState::default(),
build_info: EnclaveBuildInfo::new(BTreeMap::new()),
metadata: None,
})
}
/// Initialize the enclave environment and start the enclave.
fn create_enclave(
&mut self,
enclave_name: String,
connection: Option<&Connection>,
) -> NitroCliResult<String> {
self.init_memory(connection)
.map_err(|e| e.add_subaction("Memory initialization issue".to_string()))?;
self.init_cpus()
.map_err(|e| e.add_subaction("vCPUs initialization issue".to_string()))?;
let sockaddr = VsockAddr::new(VMADDR_CID_PARENT, ENCLAVE_READY_VSOCK_PORT);
let listener = VsockListener::bind(&sockaddr).map_err(|_| {
new_nitro_cli_failure!(
"Enclave boot heartbeat vsock connection - vsock bind error",
NitroCliErrorEnum::EnclaveBootFailure
)
})?;
let enclave_start = self
.start(connection)
.map_err(|e| e.add_subaction("Enclave start issue".to_string()))?;
// Get eif size to feed it to calculate_necessary_timeout helper function
let eif_size = self
.eif_file
.as_ref()
.ok_or_else(|| {
new_nitro_cli_failure!(
"Failed to get EIF file",
NitroCliErrorEnum::FileOperationFailure
)
})?
.metadata()
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to get enclave image file metadata: {e:?}"),
NitroCliErrorEnum::FileOperationFailure
)
})?
.len();
// Update the poll timeout based on the eif size or allocated memory
let poll_timeout = calculate_necessary_timeout(eif_size, self.allocated_memory_mib * MiB);
enclave_ready(listener, poll_timeout).map_err(|err| {
let err_msg = format!("Waiting on enclave to boot failed with error {err:?}");
self.terminate_enclave_error(&err_msg);
new_nitro_cli_failure!(&err_msg, NitroCliErrorEnum::EnclaveBootFailure)
})?;
self.enclave_cid = Some(enclave_start.enclave_cid);
let info = get_run_enclaves_info(
enclave_name,
enclave_start.enclave_cid,
self.slot_uid,
self.cpu_ids.clone(),
self.allocated_memory_mib,
)
.map_err(|e| e.add_subaction("Get RunEnclaves information issue".to_string()))?;
safe_conn_println(
connection,
serde_json::to_string_pretty(&info)
.map_err(|err| {
new_nitro_cli_failure!(
&format!("Failed to display RunEnclaves data: {err:?}"),
NitroCliErrorEnum::SerdeError
)
})?
.as_str(),
)?;
Ok(info.enclave_id)
}
/// Allocate memory and provide it to the enclave.
fn init_memory(&mut self, connection: Option<&Connection>) -> NitroCliResult<()> {
// Allocate the memory regions needed by the enclave.
safe_conn_eprintln(connection, "Start allocating memory...")?;
let requested_mem_mib = self.resource_allocator.requested_mem >> 20;
let regions = self
.resource_allocator
.allocate()
.map_err(|e| e.add_subaction("Failed to allocate enclave memory".to_string()))?;
self.allocated_memory_mib = regions.iter().fold(0, |mut acc, val| {
acc += val.mem_size;
acc
}) >> 20;
if self.allocated_memory_mib < requested_mem_mib {
return Err(new_nitro_cli_failure!(
&format!(
"Failed to allocate sufficient memory (requested {} MB, but got {} MB)",
requested_mem_mib, self.allocated_memory_mib
),
NitroCliErrorEnum::InsufficientMemoryAvailable
)
.add_info(vec!["memory", &requested_mem_mib.to_string()]));
}
let eif_file = self.eif_file.as_mut().ok_or_else(|| {
new_nitro_cli_failure!(
"Failed to get mutable reference to EIF file",
NitroCliErrorEnum::FileOperationFailure
)
})?;
let mut image_load_info = ImageLoadInfo {
flags: NE_EIF_IMAGE,
memory_offset: 0,
};
EnclaveHandle::do_ioctl(self.enc_fd, NE_GET_IMAGE_LOAD_INFO, &mut image_load_info)
.map_err(|e| e.add_subaction("Get image load info ioctl failed".to_string()))?;
debug!("Memory load information: {:?}", image_load_info);
write_eif_to_regions(eif_file, regions, image_load_info.memory_offset as usize)
.map_err(|e| e.add_subaction("Write EIF to enclave memory regions".to_string()))?;
// Provide the regions to the driver for ownership change.
for region in regions {
let mut user_mem_region: UserMemoryRegion = region.into();
EnclaveHandle::do_ioctl(self.enc_fd, NE_SET_USER_MEMORY_REGION, &mut user_mem_region)
.map_err(|e| e.add_subaction("Set user memory region ioctl failed".to_string()))?;
}
info!("Finished initializing memory.");
Ok(())
}
/// Initialize a single vCPU from a given ID.
fn init_single_cpu(&mut self, mut cpu_id: u32) -> NitroCliResult<()> {
EnclaveHandle::do_ioctl(self.enc_fd, NE_ADD_VCPU, &mut cpu_id)
.map_err(|e| e.add_subaction("Add vCPU ioctl failed".to_string()))?;
self.cpu_ids.push(cpu_id);
debug!("Added CPU with ID {}.", cpu_id);
Ok(())
}
/// Provide CPUs from the parent instance to the enclave.
fn init_cpus(&mut self) -> NitroCliResult<()> {
let cpu_config = self.cpu_config.clone();
match cpu_config {
EnclaveCpuConfig::List(cpu_ids) => {
for cpu_id in cpu_ids {
self.init_single_cpu(cpu_id).map_err(|e| {
e.add_subaction(format!("Failed to add CPU with ID {cpu_id}"))
})?;
}
}
EnclaveCpuConfig::Count(cpu_count) => {
for _ in 0..cpu_count {
self.init_single_cpu(0)?;
}
}
}
Ok(())
}
/// Start an enclave after providing it with its necessary resources.
fn start(&mut self, connection: Option<&Connection>) -> NitroCliResult<EnclaveStartInfo> {
let mut start = EnclaveStartInfo {
flags: self.flags,
enclave_cid: self.enclave_cid.unwrap_or(0),
};
EnclaveHandle::do_ioctl(self.enc_fd, NE_START_ENCLAVE, &mut start)
.map_err(|e| e.add_subaction("Start enclave ioctl failed".to_string()))?;
safe_conn_eprintln(
connection,
format!(
"Started enclave with enclave-cid: {}, memory: {} MiB, cpu-ids: {:?}",
{ start.enclave_cid },
self.allocated_memory_mib,
self.cpu_ids
)
.as_str(),
)?;
Ok(start)
}
/// Terminate an enclave.
fn terminate_enclave(&mut self) -> NitroCliResult<()> {
if self.enclave_cid.unwrap_or(0) != 0 {
release_enclave_descriptor(self.enc_fd)
.map_err(|e| e.add_subaction("Failed to release enclave descriptor".to_string()))?;
// Release used memory.
self.resource_allocator
.free()
.map_err(|e| e.add_subaction("Failed to release used memory".to_string()))?;
info!("Enclave terminated.");
// Mark enclave as terminated.
self.clear();
}
Ok(())
}
/// Terminate an enclave and notify in case of errors.
fn terminate_enclave_and_notify(&mut self) {
// Attempt to terminate the enclave we are holding.
if let Err(error_info) = self.terminate_enclave() {
let mut err_msg = format!(
"Terminating enclave '{:X}' failed with error: {:?}",
self.slot_uid,
construct_error_message(&error_info).as_str()
);
err_msg.push_str(
"!!! The instance could be in an inconsistent state, please reboot it !!!",
);
// The error message should reach both the user and the logger.
notify_error(&err_msg);
}
}
/// Clear handle resources after terminating an enclave.
fn clear(&mut self) {
self.cpu_ids.clear();
self.allocated_memory_mib = 0;
self.enclave_cid = Some(0);
self.enc_fd = -1;
self.slot_uid = 0;
}
/// Terminate the enclave if `run-enclave` failed.
fn terminate_enclave_error(&mut self, err: &str) {
let err_msg = format!("{err}. Terminating the enclave...");
// Notify the user and the logger of the error, then terminate the enclave.
notify_error(&err_msg);
self.terminate_enclave_and_notify();
}
/// Wrapper over an `ioctl()` operation
fn do_ioctl<T>(fd: RawFd, ioctl_code: u64, arg: &mut T) -> NitroCliResult<i32> {
let rc = unsafe { libc::ioctl(fd, ioctl_code as _, arg) };
if rc >= 0 {
return Ok(rc);
}
let err_msg = match Error::last_os_error().raw_os_error().unwrap_or(0) as u32 {
NE_ERR_VCPU_ALREADY_USED => "The provided vCPU is already used".to_string(),
NE_ERR_VCPU_NOT_IN_CPU_POOL => {
"The provided vCPU is not available in the CPU pool".to_string()
}
NE_ERR_VCPU_INVALID_CPU_CORE => {
"The vCPU core ID is invalid for the CPU pool".to_string()
}
NE_ERR_INVALID_MEM_REGION_SIZE => {
"The memory region's size is not a multiple of 2 MiB".to_string()
}
NE_ERR_INVALID_MEM_REGION_ADDR => "The memory region's address is invalid".to_string(),
NE_ERR_UNALIGNED_MEM_REGION_ADDR => {
"The memory region's address is not aligned".to_string()
}
NE_ERR_MEM_REGION_ALREADY_USED => "The memory region is already used".to_string(),
NE_ERR_MEM_NOT_HUGE_PAGE => {
"The memory region is not backed by contiguous physical huge page(s)".to_string()
}
NE_ERR_MEM_DIFFERENT_NUMA_NODE => {
"The memory region's pages and the CPUs belong to different NUMA nodes".to_string()
}
NE_ERR_MEM_MAX_REGIONS => {
"The maximum number of memory regions per enclave has been reached".to_string()
}
NE_ERR_NO_MEM_REGIONS_ADDED => {
"The enclave cannot start because no memory regions have been added".to_string()
}
NE_ERR_NO_VCPUS_ADDED => {
"The enclave cannot start because no vCPUs have been added".to_string()
}
NE_ERR_ENCLAVE_MEM_MIN_SIZE => {
"The enclave's memory size is lower than the minimum supported".to_string()
}
NE_ERR_FULL_CORES_NOT_USED => {
"The enclave cannot start because full CPU cores have not been set".to_string()
}
NE_ERR_NOT_IN_INIT_STATE => {
"The enclave is in an incorrect state to set resources or start".to_string()
}
NE_ERR_INVALID_VCPU => {
"The provided vCPU is out of range of the available CPUs".to_string()
}
NE_ERR_NO_CPUS_AVAIL_IN_POOL => {
"The enclave cannot be created because no CPUs are available in the pool"
.to_string()
}
NE_ERR_INVALID_PAGE_SIZE => {
"The memory region is not backed by page(s) multiple of 2 MiB".to_string()
}
NE_ERR_INVALID_FLAG_VALUE => {
"The provided flags value in the ioctl arg data structure is invalid".to_string()
}
NE_ERR_INVALID_ENCLAVE_CID => {
"The provided enclave CID is invalid, being a well-known CID or the parent VM CID"
.to_string()
}
e => format!("An error has occurred: {e} (rc: {rc})"),
};
Err(new_nitro_cli_failure!(
&err_msg,
NitroCliErrorEnum::IoctlFailure
))
}
}
impl Drop for EnclaveHandle {
fn drop(&mut self) {
// Check if we are (still) owning an enclave.
if self.enclave_cid.unwrap_or(0) == 0 {
debug!("Resource manager does not hold an enclave.");
return;
}
// Terminate the enclave, notifying of any errors.
self.terminate_enclave_and_notify();
}
}
impl EnclaveManager {
/// Create a new `EnclaveManager` instance.
pub fn new(
enclave_cid: Option<u64>,
memory_mib: u64,
cpu_ids: EnclaveCpuConfig,
eif_file: File,
debug_mode: bool,
enclave_name: String,
) -> NitroCliResult<Self> {
let enclave_handle =
EnclaveHandle::new(enclave_cid, memory_mib, cpu_ids, eif_file, debug_mode)
.map_err(|e| e.add_subaction("Failed to create enclave handle".to_string()))?;
Ok(EnclaveManager {
enclave_id: String::new(),
enclave_name,
enclave_handle: Arc::new(Mutex::new(enclave_handle)),
})
}
/// Launch an enclave using the previously-set configuration.
///
/// The enclave handle is locked throughout enclave creation. This is fine, since
/// the socket for receiving commands is exposed only after creation has completed.
pub fn run_enclave(&mut self, connection: Option<&Connection>) -> NitroCliResult<()> {
self.enclave_id = self
.enclave_handle
.lock()
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | true |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc/cpu_info.rs | src/enclave_proc/cpu_info.rs | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use std::collections::BTreeSet;
use std::fs::File;
use std::io::{BufRead, BufReader};
use crate::common::commands_parser::RunEnclavesArgs;
use crate::common::{NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use crate::new_nitro_cli_failure;
/// Path corresponding to the NE CPU pool.
const POOL_FILENAME: &str = "/sys/module/nitro_enclaves/parameters/ne_cpus";
/// The CPU configuration requested by the user.
#[derive(Clone, PartialEq, Eq)]
pub enum EnclaveCpuConfig {
/// A list with the desired CPU IDs.
List(Vec<u32>),
/// The number of desired CPU IDs.
Count(u32),
}
/// Aggregate CPU information for multiple CPUs.
#[derive(Debug)]
pub struct CpuInfo {
/// The list with the CPUs available for enclaves.
cpu_ids: Vec<u32>,
}
impl Default for EnclaveCpuConfig {
fn default() -> Self {
EnclaveCpuConfig::Count(0)
}
}
impl CpuInfo {
/// Create a new `CpuInfo` instance from the current system configuration.
pub fn new() -> NitroCliResult<Self> {
let pool_file = File::open(POOL_FILENAME).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to open CPU pool file: {e}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![POOL_FILENAME, "Open"])
})?;
let file_reader = BufReader::new(pool_file);
CpuInfo::from_reader(file_reader)
}
/// Create a new `CpuInfo` instance from a buffered reader
pub(crate) fn from_reader<B: BufRead>(reader: B) -> NitroCliResult<Self> {
Ok(CpuInfo {
cpu_ids: CpuInfo::get_cpu_info(reader)?,
})
}
/// Get the CPU configuration from the command-line arguments.
pub fn get_cpu_config(&self, args: &RunEnclavesArgs) -> NitroCliResult<EnclaveCpuConfig> {
if let Some(cpu_ids) = args.cpu_ids.clone() {
self.check_cpu_ids(&cpu_ids).map_err(|e| {
e.add_subaction("Failed to check whether CPU list is valid".to_string())
})?;
Ok(EnclaveCpuConfig::List(cpu_ids))
} else if let Some(cpu_count) = args.cpu_count {
if self.cpu_ids.len() < cpu_count as usize {
return Err(new_nitro_cli_failure!(
&format!(
"Insufficient CPUs available (requested {}, but maximum is {})",
cpu_count,
self.cpu_ids.len()
),
NitroCliErrorEnum::InsufficientCpus
)
.add_info(vec!["cpu-count", &cpu_count.to_string()]));
}
Ok(EnclaveCpuConfig::Count(cpu_count))
} else {
// Should not happen.
Err(new_nitro_cli_failure!(
"Invalid CPU configuration argument",
NitroCliErrorEnum::InvalidArgument
))
}
}
/// Verify that a provided list of CPU IDs is valid.
pub fn check_cpu_ids(&self, cpu_ids: &[u32]) -> NitroCliResult<()> {
// Ensure there are no duplicate IDs.
let mut unique_ids = BTreeSet::new();
for cpu_id in cpu_ids {
unique_ids.insert(cpu_id);
}
if unique_ids.len() < cpu_ids.len() {
let duplicate_cpus = CpuInfo::get_duplicate_cpus(&unique_ids, cpu_ids);
return Err(new_nitro_cli_failure!(
&format!(
"CPU IDs list contains {} duplicate(s)",
cpu_ids.len() - unique_ids.len()
),
NitroCliErrorEnum::InvalidCpuConfiguration
)
.add_info(vec!["cpu-ids", duplicate_cpus.as_str()]));
}
// Ensure the requested CPUs are available in the CPU pool.
for cpu_id in unique_ids {
if !self.cpu_ids.contains(cpu_id) {
return Err(new_nitro_cli_failure!(
&format!("The CPU with ID {cpu_id} is not available in the NE CPU pool"),
NitroCliErrorEnum::NoSuchCpuAvailableInPool
)
.add_info(vec!["cpu-ids", &cpu_id.to_string()]));
}
}
// At this point, all requested CPU IDs are part of the enclave CPU pool.
Ok(())
}
/// Get a list of all available CPU IDs.
pub fn get_cpu_candidates(&self) -> Vec<u32> {
self.cpu_ids.clone()
}
/// Parse a `lscpu` line to obtain a numeric value.
pub fn get_value(line: &str) -> NitroCliResult<u32> {
let mut line_str = line.to_string();
line_str.retain(|c| !c.is_whitespace());
line_str.parse::<u32>().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to parse CPU ID: {e}"),
NitroCliErrorEnum::MalformedCpuId
)
})
}
fn parse_cpu_pool_line(line_str: &str) -> NitroCliResult<Vec<u32>> {
let mut result: Vec<u32> = Vec::new();
// The CPU pool format is: "id1-id2,id3-id4,..."
for interval in line_str.split(',') {
let bounds: Vec<&str> = interval.split('-').collect();
match bounds.len() {
1 => result.push(CpuInfo::get_value(bounds[0])?),
2 => {
let start_id = CpuInfo::get_value(bounds[0])?;
let end_id = CpuInfo::get_value(bounds[1])?;
for cpu_id in start_id..=end_id {
result.push(cpu_id);
}
}
_ => {
return Err(new_nitro_cli_failure!(
&format!("Invalid CPU ID interval ({interval})"),
NitroCliErrorEnum::CpuError
))
}
}
}
Ok(result)
}
/// Parse the CPU pool and build the list of off-line CPUs.
fn get_cpu_info<B: BufRead>(reader: B) -> NitroCliResult<Vec<u32>> {
let mut result: Vec<u32> = Vec::new();
for line in reader.lines() {
let line_str = line.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to read line from CPU pool file: {e}"),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![POOL_FILENAME, "Read"])
})?;
if line_str.trim().is_empty() {
continue;
}
result.append(&mut CpuInfo::parse_cpu_pool_line(&line_str)?);
}
Ok(result)
}
/// Get a list of duplicate CPUs.
fn get_duplicate_cpus(uniques: &BTreeSet<&u32>, cpu_ids: &[u32]) -> String {
let mut result = String::new();
for unique_cpu_id in uniques {
if cpu_ids.iter().filter(|x| x == unique_cpu_id).count() > 1 {
result.push_str(&(unique_cpu_id.to_string()));
}
}
result
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::construct_error_message;
#[test]
fn test_parse_cpu_pool_line() {
let result0 = CpuInfo::parse_cpu_pool_line("1-3,4-6,7-9");
assert!(result0.is_ok());
assert_eq!(result0.unwrap(), vec![1, 2, 3, 4, 5, 6, 7, 8, 9]);
let result1 = CpuInfo::parse_cpu_pool_line("1-4,7");
assert!(result1.is_ok());
assert_eq!(result1.unwrap(), vec![1, 2, 3, 4, 7]);
let result2 = CpuInfo::parse_cpu_pool_line("3,5,7,9");
assert!(result2.is_ok());
assert_eq!(result2.unwrap(), vec![3, 5, 7, 9]);
let result3 = CpuInfo::parse_cpu_pool_line("3+5,7-10");
assert!(result3.is_err());
let result4 = CpuInfo::parse_cpu_pool_line("3-a,7-b");
assert!(result4.is_err());
}
#[test]
fn test_get_value_correct_format() {
let result0 = CpuInfo::get_value("\t3");
assert!(result0.is_ok());
assert_eq!(result0.unwrap(), 3);
let result1 = CpuInfo::get_value(" \t 4");
assert!(result1.is_ok());
assert_eq!(result1.unwrap(), 4);
let result2 = CpuInfo::get_value(" \n 12");
assert!(result2.is_ok());
assert_eq!(result2.unwrap(), 12);
}
#[test]
fn test_get_value_incorrect_format() {
let result0 = CpuInfo::get_value("\t-2");
assert!(result0.is_err());
if let Err(err_info) = result0 {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Malformed CPU ID error"));
}
let result1 = CpuInfo::get_value("\n\n0x06");
assert!(result1.is_err());
if let Err(err_info) = result1 {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Malformed CPU ID error"));
}
let result2 = CpuInfo::get_value(" processor");
assert!(result2.is_err());
if let Err(err_info) = result2 {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Malformed CPU ID error"));
}
}
#[test]
fn test_get_cpu_config_invalid_input() {
let cpu_info = CpuInfo::from_reader("1".as_bytes()).unwrap();
let mut run_args = RunEnclavesArgs {
eif_path: String::new(),
enclave_cid: None,
memory_mib: 0,
debug_mode: false,
attach_console: false,
cpu_ids: None,
cpu_count: Some(343),
enclave_name: Some("testName".to_string()),
};
let mut result = cpu_info.get_cpu_config(&run_args);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Insufficient CPUs available"));
}
run_args.cpu_count = None;
run_args.cpu_ids = Some(vec![1, 2, 3, 4, 5, 6, 7]);
result = cpu_info.get_cpu_config(&run_args);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("No such CPU available in the pool"));
}
}
#[test]
fn test_get_cpu_config_valid_input() {
let cpu_info = CpuInfo::from_reader("1,3".as_bytes()).unwrap();
let mut run_args = RunEnclavesArgs {
eif_path: String::new(),
enclave_cid: None,
memory_mib: 0,
debug_mode: false,
attach_console: false,
cpu_ids: None,
cpu_count: Some(2),
enclave_name: Some("testName".to_string()),
};
let mut result = cpu_info.get_cpu_config(&run_args);
assert!(result.is_ok());
assert!(result.unwrap() == EnclaveCpuConfig::Count(2));
run_args.cpu_count = None;
run_args.cpu_ids = Some(vec![1, 3]);
result = cpu_info.get_cpu_config(&run_args);
assert!(result.is_ok());
assert!(result.unwrap() == EnclaveCpuConfig::List(vec![1, 3]));
}
#[test]
fn test_get_cpu_candidates() {
let cpu_info = CpuInfo::from_reader("1".as_bytes()).unwrap();
let candidate_cpus = cpu_info.get_cpu_candidates();
assert!(!candidate_cpus.is_empty());
}
#[test]
fn test_check_cpu_ids() {
let cpu_info = CpuInfo::from_reader("1,3".as_bytes()).unwrap();
let mut cpu_ids: Vec<u32> = vec![1];
let mut result = cpu_info.check_cpu_ids(&cpu_ids);
assert!(result.is_ok());
cpu_ids = vec![1, 1];
result = cpu_info.check_cpu_ids(&cpu_ids);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("Invalid CPU configuration"));
}
cpu_ids = vec![1, 3];
result = cpu_info.check_cpu_ids(&cpu_ids);
assert!(result.is_ok());
cpu_ids = vec![1, 3, 5];
result = cpu_info.check_cpu_ids(&cpu_ids);
assert!(result.is_err());
if let Err(err_info) = result {
let err_str = construct_error_message(&err_info);
assert!(err_str.contains("No such CPU available in the pool"));
}
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc/socket.rs | src/enclave_proc/socket.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use inotify::{EventMask, Inotify, WatchMask};
use log::{debug, warn};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{self, JoinHandle};
use crate::common::get_socket_path;
use crate::common::{ExitGracefully, NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use crate::new_nitro_cli_failure;
/// The structure which manages the Unix socket that an enclave process listens on for commands.
#[derive(Default)]
pub struct EnclaveProcSock {
/// The socket's file-system path.
socket_path: PathBuf,
/// The thread which listens for external events which delete the socket from the file-system.
remove_listener_thread: Option<JoinHandle<()>>,
/// A flag indicating if socket removal was requested.
requested_remove: Arc<AtomicBool>,
}
/// The enclave process socket must allow cloning, since that is needed by the socket-listening thread.
impl Clone for EnclaveProcSock {
fn clone(&self) -> Self {
// Actually clone only what's relevant for the listening thread.
EnclaveProcSock {
socket_path: self.socket_path.clone(),
remove_listener_thread: None,
requested_remove: self.requested_remove.clone(),
}
}
}
impl Drop for EnclaveProcSock {
fn drop(&mut self) {
self.close_mut()
.ok_or_exit_with_errno(Some("Failed to drop socket"));
}
}
impl EnclaveProcSock {
/// Create a new `EnclaveProcSock` instance from a given enclave ID.
pub fn new(enclave_id: &str) -> NitroCliResult<Self> {
let socket_path = get_socket_path(enclave_id).map_err(|_| {
new_nitro_cli_failure!(
"Failed to create enclave process socket",
NitroCliErrorEnum::SocketPathNotFound
)
})?;
Ok(EnclaveProcSock {
socket_path,
remove_listener_thread: None,
requested_remove: Arc::new(AtomicBool::new(false)),
})
}
/// Get the path to the managed Unix socket.
pub fn get_path(&self) -> &Path {
self.socket_path.as_path()
}
/// Set the path of the managed Unix socket.
pub fn set_path(&mut self, socket_path: PathBuf) {
self.socket_path = socket_path;
}
/// Start monitoring the Unix socket's state using `inotify`.
pub fn start_monitoring(&mut self, exit_on_delete: bool) -> NitroCliResult<()> {
let path_clone = self.socket_path.clone();
let requested_remove_clone = self.requested_remove.clone();
let socket_inotify = Inotify::init().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to initialize socket notifications: {e:?}"),
NitroCliErrorEnum::InotifyError
)
})?;
// Relevant events to listen for are:
// - IN_DELETE_SELF: triggered when the socket file inode gets removed.
// - IN_ATTRIB: triggered when the reference count of the file inode changes.
socket_inotify
.watches()
.add(
self.socket_path.as_path(),
WatchMask::ATTRIB | WatchMask::DELETE_SELF,
)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to add watch to inotify: {e:?}"),
NitroCliErrorEnum::InotifyError
)
})?;
self.remove_listener_thread = Some(thread::spawn(move || {
socket_removal_listener(
path_clone,
requested_remove_clone,
socket_inotify,
exit_on_delete,
)
}));
Ok(())
}
/// Remove the managed Unix socket and clean up after it. This is called with a mutable self-reference.
fn close_mut(&mut self) -> NitroCliResult<()> {
// Delete the socket from the disk. Also mark that this operation is intended, so that the
// socket file monitoring thread doesn't exit forcefully when notifying the deletion.
self.requested_remove.store(true, Ordering::SeqCst);
if self.socket_path.exists() {
std::fs::remove_file(&self.socket_path).map_err(|e| {
new_nitro_cli_failure!(
&format!(
"Failed to remove socket file {:?} from disk: {:?}",
self.socket_path, e
),
NitroCliErrorEnum::FileOperationFailure
)
.add_info(vec![
self.socket_path
.to_str()
.unwrap_or("Invalid unicode socket file name"),
"Remove",
])
})?;
}
// Since the socket file has been deleted, we also wait for the event listener thread to finish.
if self.remove_listener_thread.is_some() {
self.remove_listener_thread
.take()
.unwrap()
.join()
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to join socket notification thread: {e:?}"),
NitroCliErrorEnum::ThreadJoinFailure
)
})?;
}
Ok(())
}
/// Remove the managed Unix socket and clean up after it.
pub fn close(mut self) -> NitroCliResult<()> {
self.close_mut()
.map_err(|e| e.add_subaction("Close socket".to_string()))
}
}
/// Listen for an `inotify` event when the socket gets deleted from the disk.
fn socket_removal_listener(
socket_path: PathBuf,
requested_remove: Arc<AtomicBool>,
mut socket_inotify: Inotify,
exit_on_delete: bool,
) {
let mut buffer = [0u8; 4096];
let mut done = false;
debug!("Socket file event listener started for {:?}.", socket_path);
while !done {
// Read events.
let events = socket_inotify
.read_events_blocking(&mut buffer)
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Socket removal listener error: {e:?}"),
NitroCliErrorEnum::InotifyError
)
.set_action("Run Enclave".to_string())
})
.ok_or_exit_with_errno(Some("Failed to read inotify events"));
for event in events {
// We monitor the DELETE_SELF event, which occurs when the inode is no longer referenced by anybody. We
// also monitor the IN_ATTRIB event, which gets triggered whenever the inode reference count changes. To
// make sure this is a deletion, we also verify if the socket file is still present in the file-system.
if (event.mask.contains(EventMask::ATTRIB)
|| event.mask.contains(EventMask::DELETE_SELF))
&& !socket_path.exists()
{
if requested_remove.load(Ordering::SeqCst) {
// At this point, the socket is shutting itself down and has notified the
// monitoring thread, so we just exit the loop gracefully.
debug!("The enclave process socket has deleted itself.");
done = true;
} else {
// At this point, the socket has been deleted by an external action, so
// we exit forcefully, since there is no longer any way for a CLI instance
// to tell the current enclave process to terminate.
warn!("The enclave process socket has been deleted!");
if exit_on_delete {
std::process::exit(1);
}
done = true;
}
}
}
}
debug!("Enclave process socket monitoring is done.");
}
#[cfg(test)]
mod tests {
use super::*;
use std::os::unix::net::UnixListener;
use std::process::Command;
const DUMMY_ENCLAVE_ID: &str = "i-0000000000000000-enc0123456789012345";
const THREADS_STR: &str = "Threads:";
const WAIT_REMOVE_MILLIS: u64 = 10;
/// Inspects the content of /proc/<PID>/status in order to
/// retrieve the number of threads running in the context of
/// process <PID>.
fn get_num_threads_from_status_output(status_str: String) -> u32 {
let start_idx = status_str.find(THREADS_STR);
let mut iter = status_str.chars();
iter.by_ref().nth(start_idx.unwrap() + THREADS_STR.len()); // skip "Threads:\t"
let slice = iter.as_str();
let new_str = slice.to_string();
let end_idx = new_str.find('\n'); // skip after the first '\n'
let substr = &slice[..end_idx.unwrap()];
substr.parse().unwrap()
}
/// Tests that the initial values of the EnclaveProcSock attributes match the
/// expected ones.
#[test]
fn test_enclaveprocsock_init() {
let socket = EnclaveProcSock::new(DUMMY_ENCLAVE_ID);
assert!(socket.is_ok());
if let Ok(socket) = socket {
assert!(socket
.socket_path
.as_path()
.to_str()
.unwrap()
.contains("0123456789012345"));
assert!(socket.remove_listener_thread.is_none());
assert!(!socket.requested_remove.load(Ordering::SeqCst));
}
}
/// Tests that removing the socket file by means other than `close()` does not
/// trigger a `socket.requested_remove` change.
#[test]
fn test_start_monitoring() {
let socket = EnclaveProcSock::new(DUMMY_ENCLAVE_ID);
assert!(socket.is_ok());
if let Ok(mut socket) = socket {
UnixListener::bind(socket.get_path())
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to bind to socket: {e:?}"),
NitroCliErrorEnum::SocketError
)
})
.ok_or_exit_with_errno(Some("Error binding"));
let result = socket.start_monitoring(false);
assert!(result.is_ok());
// Remove socket file and expect `socket.requested_remove` to remain False
std::fs::remove_file(&socket.socket_path).unwrap();
std::thread::sleep(std::time::Duration::from_millis(WAIT_REMOVE_MILLIS));
assert!(!socket.requested_remove.load(Ordering::SeqCst));
}
}
/// Test that calling `close()` changes `socket.requested_remove` to True and
/// that the listener thread joins.
#[test]
fn test_close() {
let socket = EnclaveProcSock::new(DUMMY_ENCLAVE_ID);
assert!(socket.is_ok());
// Get number of running threads before spawning the socket removal listener thread
let out_cmd0 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out0 = std::str::from_utf8(&out_cmd0.stdout).unwrap();
let crt_num_threads0 = get_num_threads_from_status_output(out0.to_string());
if let Ok(mut socket) = socket {
let _ = UnixListener::bind(socket.get_path())
.map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to bind to socket: {e:?}"),
NitroCliErrorEnum::SocketError
)
})
.ok_or_exit_with_errno(Some("Error binding"));
let result = socket.start_monitoring(true);
assert!(result.is_ok());
// Call `close_mut()` and expect `socket.requested_remove` to change to True
let result = socket.close_mut();
assert!(result.is_ok());
assert!(socket.requested_remove.load(Ordering::SeqCst));
}
// Get number of running threads after closing the socket removal listener thread
let out_cmd1 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out1 = std::str::from_utf8(&out_cmd1.stdout).unwrap();
let crt_num_threads1 = get_num_threads_from_status_output(out1.to_string());
// Check that the number of threads remains the same before and after running the test
assert_eq!(crt_num_threads0, crt_num_threads1);
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/src/enclave_proc/connection_listener.rs | src/enclave_proc/connection_listener.rs | // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(missing_docs)]
#![deny(warnings)]
use log::{debug, info, warn};
use nix::sys::epoll::{self, EpollEvent, EpollFlags, EpollOp};
use std::fs::set_permissions;
use std::fs::Permissions;
use std::io;
#[cfg(test)]
use std::os::raw::c_char;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net::{UnixListener, UnixStream};
use std::thread::{self, JoinHandle};
use super::connection::Connection;
use super::socket::EnclaveProcSock;
use crate::common::commands_parser::EmptyArgs;
use crate::common::{enclave_proc_command_send_single, receive_from_stream};
use crate::common::{
EnclaveProcessCommandType, ExitGracefully, NitroCliErrorEnum, NitroCliFailure, NitroCliResult,
};
use crate::new_nitro_cli_failure;
/// A listener which waits for incoming connections on the enclave process socket.
#[derive(Default)]
pub struct ConnectionListener {
/// The epoll descriptor used to register new connections.
epoll_fd: RawFd,
/// A dedicated thread that listens for new connections.
listener_thread: Option<JoinHandle<()>>,
/// The Unix socket that the listener binds to.
socket: EnclaveProcSock,
}
/// The listener must be cloned when launching the listening thread.
impl Clone for ConnectionListener {
fn clone(&self) -> Self {
// Actually clone only what's relevant for the listening thread.
ConnectionListener {
epoll_fd: self.epoll_fd,
listener_thread: None,
socket: self.socket.clone(),
}
}
}
impl ConnectionListener {
/// Create a new `ConnectionListener` instance.
pub fn new() -> NitroCliResult<Self> {
Ok(ConnectionListener {
epoll_fd: epoll::epoll_create().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to initialize epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?,
listener_thread: None,
socket: EnclaveProcSock::default(),
})
}
/// Expose the `epoll` descriptor.
pub fn get_epoll_fd(&self) -> RawFd {
self.epoll_fd
}
/// Initialize the connection listener from a specified enclave ID.
pub fn start(&mut self, enclave_id: &str) -> NitroCliResult<()> {
// Obtain the socket to listen on.
self.socket = EnclaveProcSock::new(enclave_id)
.map_err(|e| e.add_subaction("Failed to create enclave process socket".to_string()))?;
// Bind the listener to the socket and spawn the listener thread.
let listener = UnixListener::bind(self.socket.get_path()).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to bind connection listener: {e:?}"),
NitroCliErrorEnum::SocketError
)
})?;
self.enable_credentials_passing(&listener);
self.socket
.start_monitoring(true)
.map_err(|e| e.add_subaction("Failed to start monitoring socket".to_string()))?;
debug!(
"Connection listener started on socket {:?}.",
self.socket.get_path()
);
let self_clone = self.clone();
self.listener_thread = Some(thread::spawn(move || {
self_clone
.connection_listener_run(listener)
.map_err(|e| {
e.add_subaction("Failed to start the listener thread".to_string())
.set_action("Run Enclave".to_string())
})
.ok_or_exit_with_errno(None);
}));
Ok(())
}
/// Add a stream to `epoll`.
pub fn add_stream_to_epoll(&self, stream: UnixStream) -> NitroCliResult<()> {
let stream_fd = stream.as_raw_fd();
let mut cli_evt = EpollEvent::new(EpollFlags::EPOLLIN, stream.into_raw_fd() as u64);
epoll::epoll_ctl(self.epoll_fd, EpollOp::EpollCtlAdd, stream_fd, &mut cli_evt).map_err(
|e| {
new_nitro_cli_failure!(
&format!("Failed to add stream to epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
},
)?;
Ok(())
}
/// Add the enclave descriptor to `epoll`.
pub fn register_enclave_descriptor(&mut self, enc_fd: RawFd) -> NitroCliResult<()> {
let mut enc_event = EpollEvent::new(
EpollFlags::EPOLLIN | EpollFlags::EPOLLERR | EpollFlags::EPOLLHUP,
enc_fd as u64,
);
epoll::epoll_ctl(self.epoll_fd, EpollOp::EpollCtlAdd, enc_fd, &mut enc_event).map_err(
|e| {
new_nitro_cli_failure!(
&format!("Failed to add enclave descriptor to epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
},
)?;
Ok(())
}
/// Handle an incoming connection.
pub fn handle_new_connection(
&self,
mut stream: UnixStream,
) -> NitroCliResult<EnclaveProcessCommandType> {
let cmd_type =
receive_from_stream::<EnclaveProcessCommandType>(&mut stream).map_err(|e| {
e.add_subaction("Failed to receive command type from stream".to_string())
})?;
// All connections must be registered with epoll, with the exception of the shutdown one.
if cmd_type != EnclaveProcessCommandType::ConnectionListenerStop {
self.add_stream_to_epoll(stream)
.map_err(|e| e.add_subaction("Failed to add stream to epoll".to_string()))?;
}
Ok(cmd_type)
}
/// Listen for incoming connections and handle them as they appear.
fn connection_listener_run(self, listener: UnixListener) -> NitroCliResult<()> {
// Accept connections and process them (this is a blocking call).
for stream in listener.incoming() {
match stream {
Ok(stream) => {
// Received a new connection. Shut down if required.
let cmd = self.handle_new_connection(stream);
if let Ok(cmd) = cmd {
if cmd == EnclaveProcessCommandType::ConnectionListenerStop {
break;
}
}
}
Err(err) => {
// Connection failed.
warn!("Connection error: {:?}", err);
break;
}
}
}
// Remove the listener's socket.
self.socket
.close()
.map_err(|e| e.add_subaction("Failed to close socket".to_string()))?;
debug!("Connection listener has finished.");
Ok(())
}
/// Terminate the connection listener.
pub fn stop(&mut self) -> NitroCliResult<()> {
// Nothing to do if the connection listener thread has not been started.
if self.listener_thread.is_none() {
return Ok(());
}
// Send termination notification to the listener thread.
let mut self_conn = UnixStream::connect(self.socket.get_path()).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to connect to listener thread: {e:?}"),
NitroCliErrorEnum::SocketError
)
})?;
enclave_proc_command_send_single::<EmptyArgs>(
EnclaveProcessCommandType::ConnectionListenerStop,
None,
&mut self_conn,
)
.map_err(|e| e.add_subaction("Failed to notify listener thread of shutdown".to_string()))?;
// Shut the connection down.
self_conn.shutdown(std::net::Shutdown::Both).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to close connection: {e:?}"),
NitroCliErrorEnum::SocketCloseError
)
})?;
// Ensure that the listener thread has terminated.
self.listener_thread.take().unwrap().join().map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to join listener thread: {e:?}"),
NitroCliErrorEnum::ThreadJoinFailure
)
})?;
info!("The connection listener has been stopped.");
Ok(())
}
/// Fetch the next available connection.
pub fn get_next_connection(&self, enc_fd: Option<RawFd>) -> NitroCliResult<Connection> {
// Wait on epoll until a valid event is received.
let mut events = [EpollEvent::empty(); 1];
loop {
match epoll::epoll_wait(self.epoll_fd, &mut events, -1) {
Ok(_) => break,
Err(nix::errno::Errno::EINTR) => continue,
Err(e) => {
return Err(new_nitro_cli_failure!(
&format!("Failed to wait on epoll: {e:?}"),
NitroCliErrorEnum::EpollError
))
}
}
}
let fd = events[0].data() as RawFd;
let input_stream = match enc_fd {
// This is a connection to an enclave.
Some(enc_fd) if enc_fd == fd => None,
// This is a connection to a CLI instance or to ourselves.
_ => Some(unsafe { UnixStream::from_raw_fd(fd) }),
};
// Remove the fetched descriptor from epoll. We are doing this here since
// otherwise the Connection would have to do it when dropped and we prefer
// the Connection not touch epoll directly.
epoll::epoll_ctl(self.epoll_fd, EpollOp::EpollCtlDel, fd, None).map_err(|e| {
new_nitro_cli_failure!(
&format!("Failed to remove descriptor from epoll: {e:?}"),
NitroCliErrorEnum::EpollError
)
})?;
Ok(Connection::new(events[0].events(), input_stream))
}
/// Enable the sending of credentials from incoming connections.
fn enable_credentials_passing(&self, listener: &UnixListener) {
let val: libc::c_int = 1;
let rc = unsafe {
libc::setsockopt(
listener.as_raw_fd(),
libc::SOL_SOCKET,
libc::SO_PASSCRED,
&val as *const libc::c_int as *const libc::c_void,
std::mem::size_of::<libc::c_int>() as libc::socklen_t,
)
};
if rc < 0 {
warn!(
"Failed to enable credentials passing on socket listener: {}",
io::Error::last_os_error()
);
}
// Since access policy is handled within the enclave process explicitly, we
// allow full access to the socket itself (otherwise other users will not
// be allowed to connect to the socket in the first place).
if let Ok(sock_addr) = listener.local_addr() {
if let Some(sock_path) = sock_addr.as_pathname() {
let perms = Permissions::from_mode(0o766);
if let Err(e) = set_permissions(sock_path, perms) {
warn!("Failed to update socket permissions: {}", e);
}
} else {
warn!("Failed to get the listener's socket path.");
}
} else {
warn!("Failed to get the socket listener's local address.")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::{get_sockets_dir_path, SOCKETS_DIR_PATH_ENV_VAR};
use std::env;
use std::fs;
use std::path::PathBuf;
use std::process::Command;
use std::sync::{Arc, Condvar, Mutex};
const THREADS_STR: &str = "Threads:";
const TMP_DIR: &str = "./npe";
fn unset_envvar(varname: &str) {
unsafe { libc::unsetenv(varname.as_ptr() as *const c_char) };
}
/// Inspects the content of /proc/<PID>/status in order to
/// retrieve the number of threads running in the context of
/// process <PID>.
fn get_num_threads_from_status_output(status_str: String) -> u32 {
let start_idx = status_str.find(THREADS_STR);
let mut iter = status_str.chars();
iter.by_ref().nth(start_idx.unwrap() + THREADS_STR.len()); // skip "Threads:\t"
let slice = iter.as_str();
let new_str = slice.to_string();
let end_idx = new_str.find('\n'); // skip after the first '\n'
let substr = &slice[..end_idx.unwrap()];
substr.parse().unwrap()
}
/// Tests that get_epoll_fd() returns the expected epoll_fd.
#[test]
fn test_get_epoll_fd() {
let connection_listener = ConnectionListener::new().unwrap();
let epoll_fd = connection_listener.epoll_fd;
assert_eq!(epoll_fd, connection_listener.get_epoll_fd());
}
/// Tests that new connections are monitored and that a command
/// sent through the connection is received correctly.
#[test]
fn test_handle_new_connection() {
let (mut sock0, sock1) = UnixStream::pair().unwrap();
let connection_listener = ConnectionListener::new().unwrap();
let cmd = EnclaveProcessCommandType::Describe;
let _ = enclave_proc_command_send_single::<EmptyArgs>(cmd, None, &mut sock0);
let result = connection_listener.handle_new_connection(sock1);
assert!(result.is_ok());
assert_eq!(result.unwrap(), EnclaveProcessCommandType::Describe);
}
/// Test that add_stream_to_epoll registers a sockfd and that next subsequent
/// attempts to register the same sockfd fail (since the sockfd is already registered
/// once).
#[test]
fn test_add_stream_to_epoll() {
let (_, sock1) = UnixStream::pair().unwrap();
let connection_listener = ConnectionListener::new().unwrap();
let copy_sock1 = sock1.try_clone();
if let Ok(copy_sock1) = copy_sock1 {
let mut cli_evt = EpollEvent::new(EpollFlags::EPOLLIN, copy_sock1.into_raw_fd() as u64);
let _ = epoll::epoll_ctl(
connection_listener.epoll_fd,
EpollOp::EpollCtlAdd,
sock1.as_raw_fd(),
&mut cli_evt,
);
// Second add should return Err(Sys(EEXIST)), as sock1 is already registed
// with connection_listener.epoll_fd
let result = epoll::epoll_ctl(
connection_listener.epoll_fd,
EpollOp::EpollCtlAdd,
sock1.as_raw_fd(),
&mut cli_evt,
);
assert!(result.is_err());
}
}
/// Test that connection_listener_run closes a previously-spawned thread when
/// processing a ConnectionListenerStop command.
#[test]
fn test_connection_listener_run_connection_stop() {
let old_log_path = env::var(SOCKETS_DIR_PATH_ENV_VAR);
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, TMP_DIR);
let resources_dir = get_sockets_dir_path();
let path_existed = resources_dir.as_path().exists();
let _ = fs::create_dir(resources_dir.as_path());
let dummy_sock_name = "run_connection_stop.sock";
let dummy_sock_path = format!(
"{}/{}",
resources_dir.as_path().to_str().unwrap(),
dummy_sock_name
);
// Remove pre-existing socket file
let _ = std::fs::remove_file(&dummy_sock_path);
let mut connection_listener = ConnectionListener::new().unwrap();
connection_listener
.socket
.set_path(PathBuf::from(&dummy_sock_path));
// Get number of running threads before spawning the listener thread
let out_cmd0 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out0 = std::str::from_utf8(&out_cmd0.stdout).unwrap();
let crt_num_threads0 = get_num_threads_from_status_output(out0.to_string());
let pair = Arc::new((Mutex::new(false), Condvar::new()));
let pair2 = pair.clone();
let listener_thread = thread::spawn(move || {
{
let (lock, cvar) = &*pair2;
let mut started = lock.lock().unwrap();
*started = true;
cvar.notify_one();
}
// Bind the listener to the socket and spawn the listener thread.
let listener = UnixListener::bind(connection_listener.socket.get_path())
.map_err(|e| format!("Failed to bind connection listener: {e:?}"))
.unwrap();
connection_listener.enable_credentials_passing(&listener);
connection_listener
.socket
.start_monitoring(true)
.map_err(|e| format!("Failed to start socket monitoring: {e:?}"))
.unwrap();
let res = connection_listener.connection_listener_run(listener);
assert!(res.is_ok());
});
// Allow thread to finish spawning
let (lock, cvar) = &*pair;
let mut started = lock.lock().unwrap();
while !*started {
started = cvar.wait(started).unwrap();
}
// Check that the listener thread is running
let out_cmd1 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out1 = std::str::from_utf8(&out_cmd1.stdout).unwrap();
let crt_num_threads1 = get_num_threads_from_status_output(out1.to_string());
assert!(crt_num_threads0 < crt_num_threads1);
let my_stream = UnixStream::connect(&dummy_sock_path);
if let Ok(mut my_stream) = my_stream {
// Close the listener thread
let cmd = EnclaveProcessCommandType::ConnectionListenerStop;
let _ = enclave_proc_command_send_single::<EmptyArgs>(cmd, None, &mut my_stream);
}
// Wait for thread to join after exiting
listener_thread
.join()
.expect("Failed to join on the associated thread");
// Check number of threads after closing the listener thread
let out_cmd2 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out2 = std::str::from_utf8(&out_cmd2.stdout).unwrap();
let crt_num_threads2 = get_num_threads_from_status_output(out2.to_string());
assert_eq!(crt_num_threads0, crt_num_threads2);
assert!(crt_num_threads2 < crt_num_threads1);
if !path_existed {
// Remove whole resources_dir
let _ = fs::remove_dir_all(resources_dir.as_path().to_str().unwrap());
} else {
// Only remove the socket file
let _ = fs::remove_file(&dummy_sock_path);
}
// Restore previous environment variable value
if let Ok(old_log_path) = old_log_path {
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, old_log_path);
} else {
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, "");
unset_envvar(&String::from(SOCKETS_DIR_PATH_ENV_VAR));
}
}
/// Test that connection_listener_run closes a previously-spawned thread when
/// processing a ConnectionListenerStop command.
#[test]
fn test_connection_listener_run_describe() {
let old_log_path = env::var(SOCKETS_DIR_PATH_ENV_VAR);
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, TMP_DIR);
let resources_dir = get_sockets_dir_path();
let path_existed = resources_dir.as_path().exists();
let _ = fs::create_dir(resources_dir.as_path());
let dummy_sock_name = "run_describe.sock";
let dummy_sock_path = format!(
"{}/{}",
resources_dir.as_path().to_str().unwrap(),
dummy_sock_name
);
// Remove pre-existing socket file
let _ = std::fs::remove_file(&dummy_sock_path);
let mut connection_listener = ConnectionListener::new().unwrap();
connection_listener
.socket
.set_path(PathBuf::from(&dummy_sock_path));
// Get number of running threads before spawning the listener thread
let out_cmd0 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out0 = std::str::from_utf8(&out_cmd0.stdout).unwrap();
let crt_num_threads0 = get_num_threads_from_status_output(out0.to_string());
let pair = Arc::new((Mutex::new(false), Condvar::new()));
let pair2 = pair.clone();
let conn_clone = connection_listener.clone();
let listener_thread = thread::spawn(move || {
{
let (lock, cvar) = &*pair2;
let mut started = lock.lock().unwrap();
*started = true;
cvar.notify_one();
}
// Bind the listener to the socket and spawn the listener thread.
let listener = UnixListener::bind(connection_listener.socket.get_path())
.map_err(|e| format!("Failed to bind connection listener: {e:?}"))
.unwrap();
connection_listener.enable_credentials_passing(&listener);
connection_listener
.socket
.start_monitoring(true)
.map_err(|e| format!("Failed to start socket monitoring: {e:?}"))
.unwrap();
conn_clone.connection_listener_run(listener).unwrap();
});
// Allow thread to finish spawning
let (lock, cvar) = &*pair;
let mut started = lock.lock().unwrap();
while !*started {
started = cvar.wait(started).unwrap();
}
// Check that the listener thread is running
let out_cmd1 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out1 = std::str::from_utf8(&out_cmd1.stdout).unwrap();
let crt_num_threads1 = get_num_threads_from_status_output(out1.to_string());
assert!(crt_num_threads0 < crt_num_threads1);
let my_stream = UnixStream::connect(&dummy_sock_path);
if let Ok(mut my_stream) = my_stream {
// Run a command other than ConnectionListenerStop
let cmd = EnclaveProcessCommandType::Describe;
let _ = enclave_proc_command_send_single::<EmptyArgs>(cmd, None, &mut my_stream);
}
// Check that the listener thread is still running
let out_cmd2 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out2 = std::str::from_utf8(&out_cmd2.stdout).unwrap();
let crt_num_threads2 = get_num_threads_from_status_output(out2.to_string());
assert!(crt_num_threads0 < crt_num_threads2);
let my_stream = UnixStream::connect(&dummy_sock_path);
if let Ok(mut my_stream) = my_stream {
// Close the listener thread
let cmd = EnclaveProcessCommandType::ConnectionListenerStop;
let _ = enclave_proc_command_send_single::<EmptyArgs>(cmd, None, &mut my_stream);
// Wait for the thread to join after exiting
listener_thread
.join()
.expect("Failed to join on the associated thread");
}
// Check number of threads after closing the listener thread
let out_cmd3 = Command::new("cat")
.arg(format!("/proc/{}/status", std::process::id()))
.output()
.expect("Failed to run cat");
let out3 = std::str::from_utf8(&out_cmd3.stdout).unwrap();
let crt_num_threads3 = get_num_threads_from_status_output(out3.to_string());
assert_eq!(crt_num_threads0, crt_num_threads3);
assert!(crt_num_threads3 < crt_num_threads1);
if !path_existed {
// Remove whole resources_dir
let _ = fs::remove_dir_all(resources_dir.as_path().to_str().unwrap());
} else {
// Only remove the socket file
let _ = fs::remove_file(&dummy_sock_path);
}
// Restore previous enviornment variable value
if let Ok(old_log_path) = old_log_path {
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, old_log_path);
} else {
env::set_var(SOCKETS_DIR_PATH_ENV_VAR, "");
unset_envvar(&String::from(SOCKETS_DIR_PATH_ENV_VAR));
}
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/tests/tests.rs | tests/tests.rs | // Copyright 2019-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(warnings)]
#[allow(unused_imports)]
#[cfg(test)]
mod tests {
use nitro_cli::common::commands_parser::{
BuildEnclavesArgs, RunEnclavesArgs, SignEifArgs, TerminateEnclavesArgs,
};
use nitro_cli::common::json_output::EnclaveDescribeInfo;
use nitro_cli::enclave_proc::commands::{describe_enclaves, run_enclaves, terminate_enclaves};
use nitro_cli::enclave_proc::resource_manager::NE_ENCLAVE_DEBUG_MODE;
use nitro_cli::enclave_proc::utils::{
flags_to_string, generate_enclave_id, get_enclave_describe_info,
};
use nitro_cli::utils::{Console, PcrType};
use nitro_cli::{
build_enclaves, build_from_docker, describe_eif, enclave_console, get_file_pcr,
new_enclave_name, sign_eif,
};
use nitro_cli::{CID_TO_CONSOLE_PORT_OFFSET, VMADDR_CID_HYPERVISOR};
use serde_json::json;
use std::convert::TryInto;
use std::fs::{File, OpenOptions};
use std::io::Write;
use tempfile::{tempdir, TempDir};
use openssl::asn1::Asn1Time;
use openssl::ec::{EcGroup, EcKey};
use openssl::hash::MessageDigest;
use openssl::nid::Nid;
use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Name, X509};
// Remote Docker image
const SAMPLE_DOCKER: &str = "public.ecr.aws/aws-nitro-enclaves/hello:v1";
#[cfg(target_arch = "x86_64")]
mod sample_docker_pcrs {
/// PCR0
pub const IMAGE_PCR: &str = "6be47f8386175bc4853c3b821f9e6fa6f65f8bd73492d1df99ba9dd0d734e11c8941e7415d9167f7d0ea6991790566a7";
/// PCR1
pub const KERNEL_PCR: &str = "0343b056cd8485ca7890ddd833476d78460aed2aa161548e4e26bedf321726696257d623e8805f3f605946b3d8b0c6aa";
/// PCR2
pub const APP_PCR: &str = "dd61366a5424eea46f60c4e9d59e6c645a46420ccf962550ee1f3c109d230f88ec23667617aeaac425a1f50fe8e384d7";
}
#[cfg(target_arch = "aarch64")]
mod sample_docker_pcrs {
/// PCR0
pub const IMAGE_PCR: &str = "fb36ba25ea45c9ce31af266023f8ce55485c6f37c3ad95b08dd32600da7606e5f55ffb050a2ad4732cfc48f5ef9c0e84";
/// PCR1
pub const KERNEL_PCR: &str = "745004eab9a0fb4a67973b261c6e7fa5418dc870292927591574385649338e54686cdeb659f3c6c2e72ba11aba2158a8";
/// PCR2
pub const APP_PCR: &str = "9397173aa14e47fe087e8aeb63928a233db048e290830de6ce2041f4580f83b599c48432467601bed8a4883e9d94ff10";
}
// Local Docker image
const COMMAND_EXECUTER_DOCKER: &str = "command_executer:eif";
pub const MAX_BOOT_TIMEOUT_SEC: u64 = 9;
use std::convert::TryFrom;
use std::time::Duration;
fn setup_env() {
if std::env::var("NITRO_CLI_BLOBS").is_err() {
std::env::set_var("NITRO_CLI_BLOBS", "/usr/share/nitro_enclaves/blobs");
}
}
#[test]
fn build_enclaves_invalid_uri() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let args = BuildEnclavesArgs {
docker_uri: "667861386598.dkr.ecr.us-east-1.amazonaws.com/enclaves-devel".to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
assert!(build_enclaves(args).is_err());
}
#[test]
fn build_enclaves_simple_image() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
let measurements = build_from_docker(
&args.docker_uri,
&args.docker_dir,
&args.output,
&args.signing_certificate,
&args.private_key,
&args.img_name,
&args.img_version,
&args.metadata,
)
.expect("Docker build failed")
.1;
assert_eq!(
measurements.get("PCR0").unwrap(),
sample_docker_pcrs::IMAGE_PCR
);
assert_eq!(
measurements.get("PCR1").unwrap(),
sample_docker_pcrs::KERNEL_PCR
);
assert_eq!(
measurements.get("PCR2").unwrap(),
sample_docker_pcrs::APP_PCR
);
}
#[test]
fn build_hello_world() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let args = BuildEnclavesArgs {
docker_uri: "hello-world:latest".to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&args.docker_uri,
&args.docker_dir,
&args.output,
&args.signing_certificate,
&args.private_key,
&args.img_name,
&args.img_version,
&args.metadata,
)
.expect("Docker build failed");
}
#[test]
fn build_enclaves_command_executer() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let args = BuildEnclavesArgs {
docker_uri: COMMAND_EXECUTER_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&args.docker_uri,
&args.docker_dir,
&args.output,
&args.signing_certificate,
&args.private_key,
&args.img_name,
&args.img_version,
&args.metadata,
)
.expect("Docker build failed");
}
fn generate_signing_cert_and_key(cert_path: &str, key_path: &str) {
let ec_group = EcGroup::from_curve_name(Nid::SECP384R1).unwrap();
let key = EcKey::generate(&ec_group).unwrap();
let pkey = PKey::from_ec_key(key.clone()).unwrap();
let mut name = X509Name::builder().unwrap();
name.append_entry_by_nid(Nid::COMMONNAME, "aws.nitro-enclaves")
.unwrap();
let name = name.build();
let before = Asn1Time::days_from_now(0).unwrap();
let after = Asn1Time::days_from_now(365).unwrap();
let mut builder = X509::builder().unwrap();
builder.set_version(2).unwrap();
builder.set_subject_name(&name).unwrap();
builder.set_issuer_name(&name).unwrap();
builder.set_pubkey(&pkey).unwrap();
builder.set_not_before(&before).unwrap();
builder.set_not_after(&after).unwrap();
builder.sign(&pkey, MessageDigest::sha384()).unwrap();
let cert = builder.build();
let mut key_file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(key_path)
.unwrap();
key_file
.write_all(&key.private_key_to_pem().unwrap())
.unwrap();
let mut cert_file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(cert_path)
.unwrap();
cert_file.write_all(&cert.to_pem().unwrap()).unwrap();
}
#[test]
fn build_enclaves_signed_simple_image() {
let dir = tempdir().unwrap();
let dir_path = dir.path().to_str().unwrap();
let eif_path = format!("{dir_path}/test.eif");
let cert_path = format!("{dir_path}/cert.pem");
let key_path = format!("{dir_path}/key.pem");
generate_signing_cert_and_key(&cert_path, &key_path);
setup_env();
let args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path,
signing_certificate: Some(cert_path),
private_key: Some(key_path),
img_name: None,
img_version: None,
metadata: None,
};
let measurements = build_from_docker(
&args.docker_uri,
&args.docker_dir,
&args.output,
&args.signing_certificate,
&args.private_key,
&args.img_name,
&args.img_version,
&args.metadata,
)
.expect("Docker build failed")
.1;
assert_eq!(
measurements.get("PCR0").unwrap(),
sample_docker_pcrs::IMAGE_PCR
);
assert_eq!(
measurements.get("PCR1").unwrap(),
sample_docker_pcrs::KERNEL_PCR
);
assert_eq!(
measurements.get("PCR2").unwrap(),
sample_docker_pcrs::APP_PCR
);
}
#[test]
fn run_describe_terminate_simple_docker_image() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let build_args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&build_args.docker_uri,
&build_args.docker_dir,
&build_args.output,
&build_args.signing_certificate,
&build_args.private_key,
&build_args.img_name,
&build_args.img_version,
&build_args.metadata,
)
.expect("Docker build failed");
let args = RunEnclavesArgs {
enclave_cid: None,
eif_path: build_args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 128,
debug_mode: true,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
run_describe_terminate(args);
}
#[test]
fn run_describe_terminate_signed_enclave_image() {
let dir = tempdir().unwrap();
let dir_path = dir.path().to_str().unwrap();
let eif_path = format!("{dir_path}/test.eif");
let cert_path = format!("{dir_path}/cert.pem");
let key_path = format!("{dir_path}/key.pem");
generate_signing_cert_and_key(&cert_path, &key_path);
setup_env();
let build_args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path,
signing_certificate: Some(cert_path),
private_key: Some(key_path),
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&build_args.docker_uri,
&build_args.docker_dir,
&build_args.output,
&build_args.signing_certificate,
&build_args.private_key,
&build_args.img_name,
&build_args.img_version,
&build_args.metadata,
)
.expect("Docker build failed");
let args = RunEnclavesArgs {
enclave_cid: None,
eif_path: build_args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 256,
debug_mode: true,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
run_describe_terminate(args);
}
#[test]
fn run_describe_terminate_separately_signed_enclave_image() {
let dir = tempdir().unwrap();
let dir_path = dir.path().to_str().unwrap();
let eif_path = format!("{dir_path}/test.eif");
let cert_path = format!("{dir_path}/cert.pem");
let key_path = format!("{dir_path}/key.pem");
generate_signing_cert_and_key(&cert_path, &key_path);
setup_env();
let build_args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path,
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&build_args.docker_uri,
&build_args.docker_dir,
&build_args.output,
&build_args.signing_certificate,
&build_args.private_key,
&build_args.img_name,
&build_args.img_version,
&build_args.metadata,
)
.expect("Docker build failed");
let sign_args = SignEifArgs {
eif_path: build_args.output.clone(),
signing_certificate: Some(cert_path),
private_key: Some(key_path),
};
sign_eif(sign_args).expect("Sign EIF failed");
let args = RunEnclavesArgs {
enclave_cid: None,
eif_path: build_args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 256,
debug_mode: true,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
run_describe_terminate(args);
}
#[test]
fn run_describe_terminate_command_executer_docker_image() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let build_args = BuildEnclavesArgs {
docker_uri: COMMAND_EXECUTER_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&build_args.docker_uri,
&build_args.docker_dir,
&build_args.output,
&build_args.signing_certificate,
&build_args.private_key,
&build_args.img_name,
&build_args.img_version,
&build_args.metadata,
)
.expect("Docker build failed");
let args = RunEnclavesArgs {
enclave_cid: None,
eif_path: build_args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 2046,
debug_mode: true,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
run_describe_terminate(args);
}
fn run_describe_terminate(args: RunEnclavesArgs) {
setup_env();
let req_enclave_cid = args.enclave_cid;
let req_mem_size = args.memory_mib;
let req_nr_cpus: u64 = args.cpu_count.unwrap().into();
let debug_mode = args.debug_mode;
let mut enclave_manager = run_enclaves(&args, None)
.expect("Run enclaves failed")
.enclave_manager;
let enclave_cid = enclave_manager.get_console_resources_enclave_cid().unwrap();
let enclave_flags = enclave_manager
.get_console_resources_enclave_flags()
.unwrap();
if let Some(req_enclave_cid) = req_enclave_cid {
assert_eq!(req_enclave_cid, enclave_cid);
}
if debug_mode {
assert_eq!(enclave_flags & NE_ENCLAVE_DEBUG_MODE, NE_ENCLAVE_DEBUG_MODE);
} else {
assert_eq!(enclave_flags & NE_ENCLAVE_DEBUG_MODE, 0);
}
let cid_copy = enclave_cid;
let console = Console::new_nonblocking(
VMADDR_CID_HYPERVISOR,
u32::try_from(cid_copy).unwrap() + CID_TO_CONSOLE_PORT_OFFSET,
)
.expect("Failed to connect to the console");
let mut buffer: Vec<u8> = Vec::new();
let duration: Duration = Duration::from_secs(MAX_BOOT_TIMEOUT_SEC);
console
.read_to_buffer(&mut buffer, duration)
.expect("Failed to check that the enclave booted");
let contents = String::from_utf8(buffer).unwrap();
let boot = contents.contains("nsm: loading out-of-tree module");
assert!(boot);
let info = get_enclave_describe_info(&enclave_manager, false).unwrap();
let replies: Vec<EnclaveDescribeInfo> = vec![info];
let reply = &replies[0];
let flags = &reply.flags;
assert_eq!({ reply.enclave_cid }, enclave_cid);
assert_eq!(reply.memory_mib, req_mem_size);
assert_eq!({ reply.cpu_count }, req_nr_cpus);
assert_eq!(reply.state, "RUNNING");
if debug_mode {
assert_eq!(flags, "DEBUG_MODE");
} else {
assert_eq!(flags, "NONE");
}
let _enclave_id = generate_enclave_id(0).expect("Describe enclaves failed");
terminate_enclaves(&mut enclave_manager, None).expect("Terminate enclaves failed");
let info = get_enclave_describe_info(&enclave_manager, false).unwrap();
assert_eq!(info.enclave_cid, 0);
assert_eq!(info.cpu_count, 0);
assert_eq!(info.memory_mib, 0);
}
#[test]
fn build_run_describe_terminate_simple_eif_image() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let build_args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&build_args.docker_uri,
&build_args.docker_dir,
&build_args.output,
&build_args.signing_certificate,
&build_args.private_key,
&build_args.img_name,
&build_args.img_version,
&build_args.metadata,
)
.expect("Docker build failed");
let run_args = RunEnclavesArgs {
enclave_cid: None,
eif_path: build_args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 128,
debug_mode: true,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
run_describe_terminate(run_args);
}
#[test]
fn console_without_debug_mode() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let build_args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&build_args.docker_uri,
&build_args.docker_dir,
&build_args.output,
&build_args.signing_certificate,
&build_args.private_key,
&build_args.img_name,
&build_args.img_version,
&build_args.metadata,
)
.expect("Docker build failed");
let run_args = RunEnclavesArgs {
enclave_cid: None,
eif_path: build_args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 128,
debug_mode: false,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
let mut enclave_manager = run_enclaves(&run_args, None)
.expect("Run enclaves failed")
.enclave_manager;
let enclave_cid = enclave_manager.get_console_resources_enclave_cid().unwrap();
let enclave_flags = enclave_manager
.get_console_resources_enclave_flags()
.unwrap();
if run_args.debug_mode {
assert_eq!(enclave_flags & NE_ENCLAVE_DEBUG_MODE, NE_ENCLAVE_DEBUG_MODE);
} else {
assert_eq!(enclave_flags & NE_ENCLAVE_DEBUG_MODE, 0);
};
let info = get_enclave_describe_info(&enclave_manager, false).unwrap();
let replies: Vec<EnclaveDescribeInfo> = vec![info];
let _reply = &replies[0];
assert!(enclave_console(enclave_cid, None).is_err());
terminate_enclaves(&mut enclave_manager, None).expect("Terminate enclaves failed");
}
#[test]
fn console_multiple_connect() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let build_args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&build_args.docker_uri,
&build_args.docker_dir,
&build_args.output,
&build_args.signing_certificate,
&build_args.private_key,
&build_args.img_name,
&build_args.img_version,
&build_args.metadata,
)
.expect("Docker build failed");
let run_args = RunEnclavesArgs {
enclave_cid: None,
eif_path: build_args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 128,
debug_mode: true,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
let mut enclave_manager = run_enclaves(&run_args, None)
.expect("Run enclaves failed")
.enclave_manager;
let enclave_cid = enclave_manager.get_console_resources_enclave_cid().unwrap();
let enclave_flags = enclave_manager
.get_console_resources_enclave_flags()
.unwrap();
if run_args.debug_mode {
assert_eq!(enclave_flags & NE_ENCLAVE_DEBUG_MODE, NE_ENCLAVE_DEBUG_MODE);
} else {
assert_eq!(enclave_flags & NE_ENCLAVE_DEBUG_MODE, 0);
}
let info = get_enclave_describe_info(&enclave_manager, false).unwrap();
let replies: Vec<EnclaveDescribeInfo> = vec![info];
let _reply = &replies[0];
for _ in 0..3 {
let console = Console::new(
VMADDR_CID_HYPERVISOR,
u32::try_from(enclave_cid).unwrap() + CID_TO_CONSOLE_PORT_OFFSET,
)
.expect("Failed to connect to the console");
drop(console);
std::thread::sleep(std::time::Duration::from_secs(2));
}
terminate_enclaves(&mut enclave_manager, None).expect("Terminate enclaves failed");
}
#[test]
fn run_describe_terminate_simple_docker_image_loop() {
for _ in 0..5 {
run_describe_terminate_simple_docker_image();
}
}
#[test]
fn run_describe_terminate_loop() {
for _ in 0..3 {
run_describe_terminate_command_executer_docker_image();
run_describe_terminate_simple_docker_image();
run_describe_terminate_signed_enclave_image();
run_describe_terminate_command_executer_docker_image();
run_describe_terminate_signed_enclave_image();
}
}
#[test]
fn build_run_save_pcrs_describe() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&args.docker_uri,
&args.docker_dir,
&args.output,
&args.signing_certificate,
&args.private_key,
&args.img_name,
&args.img_version,
&args.metadata,
)
.expect("Docker build failed");
setup_env();
let run_args = RunEnclavesArgs {
enclave_cid: None,
eif_path: args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 128,
debug_mode: true,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
let run_result = run_enclaves(&run_args, None).expect("Run enclaves failed");
let mut enclave_manager = run_result.enclave_manager;
let mut describe_thread = run_result.describe_thread;
assert!(describe_thread.is_some());
let thread_result = describe_thread
.take()
.unwrap()
.join()
.expect("Failed to join thread.")
.expect("Failed to save PCRs.");
enclave_manager
.set_measurements(thread_result.measurements)
.expect("Failed to set measurements inside enclave handle.");
get_enclave_describe_info(&enclave_manager, false).unwrap();
let build_info = enclave_manager.get_measurements().unwrap();
let measurements = build_info.measurements;
assert_eq!(
measurements.get("PCR0").unwrap(),
sample_docker_pcrs::IMAGE_PCR
);
assert_eq!(
measurements.get("PCR1").unwrap(),
sample_docker_pcrs::KERNEL_PCR
);
assert_eq!(
measurements.get("PCR2").unwrap(),
sample_docker_pcrs::APP_PCR
);
let _enclave_id = generate_enclave_id(0).expect("Describe enclaves failed");
terminate_enclaves(&mut enclave_manager, None).expect("Terminate enclaves failed");
}
fn create_metadata_json(dir: &TempDir) {
let file_path = dir.path().join("meta.json");
let mut meta_file = File::create(file_path).unwrap();
let content = json!({
"AppVersion": "3.2",
"TestField": "Some info",
"CustomField": "Added by user",
});
let json_bytes = serde_json::to_vec(&content).unwrap();
meta_file.write_all(&json_bytes[..]).unwrap();
}
#[test]
fn build_with_metadata_run_describe() {
let dir = tempdir().unwrap();
create_metadata_json(&dir);
let eif_path = dir.path().join("test.eif");
let meta_path = dir.path().join("meta.json");
setup_env();
let args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: Some("TestName".to_string()),
img_version: Some("1.0".to_string()),
metadata: Some(meta_path.to_str().unwrap().to_string()),
};
build_from_docker(
&args.docker_uri,
&args.docker_dir,
&args.output,
&args.signing_certificate,
&args.private_key,
&args.img_name,
&args.img_version,
&args.metadata,
)
.expect("Docker build failed");
setup_env();
let run_args = RunEnclavesArgs {
enclave_cid: None,
eif_path: args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 128,
debug_mode: true,
attach_console: false,
enclave_name: Some("testName".to_string()),
};
let run_result = run_enclaves(&run_args, None).expect("Run enclaves failed");
let mut enclave_manager = run_result.enclave_manager;
let mut describe_thread = run_result.describe_thread;
assert!(describe_thread.is_some());
let thread_result = describe_thread
.take()
.unwrap()
.join()
.expect("Failed to join thread.")
.expect("Failed to save PCRs.");
enclave_manager
.set_measurements(thread_result.measurements)
.expect("Failed to set measurements inside enclave handle.");
let metadata = thread_result.metadata.expect("Failed to fetch metadata");
enclave_manager
.set_metadata(metadata.clone())
.expect("Failed to set metadata inside enclave handle.");
assert_eq!(metadata.build_info.img_os, "Linux");
assert_eq!(metadata.build_info.build_tool, env!("CARGO_PKG_NAME"));
assert_eq!(
metadata.build_info.build_tool_version,
env!("CARGO_PKG_VERSION")
);
assert_eq!(
*metadata
.docker_info
.get("RepoTags")
.unwrap()
.get(0)
.unwrap(),
json!("public.ecr.aws/aws-nitro-enclaves/hello:v1")
);
assert_eq!(
*metadata.custom_info.get("AppVersion").unwrap(),
json!("3.2")
);
assert_eq!(
*metadata.custom_info.get("TestField").unwrap(),
json!("Some info")
);
assert_eq!(
*metadata.custom_info.get("CustomField").unwrap(),
json!("Added by user")
);
let _enclave_id = generate_enclave_id(0).expect("Describe enclaves failed");
terminate_enclaves(&mut enclave_manager, None).expect("Terminate enclaves failed");
}
#[test]
fn build_run_default_enclave_name() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
setup_env();
let args = BuildEnclavesArgs {
docker_uri: SAMPLE_DOCKER.to_string(),
docker_dir: None,
output: eif_path.to_str().unwrap().to_string(),
signing_certificate: None,
private_key: None,
img_name: None,
img_version: None,
metadata: None,
};
build_from_docker(
&args.docker_uri,
&args.docker_dir,
&args.output,
&args.signing_certificate,
&args.private_key,
&args.img_name,
&args.img_version,
&args.metadata,
)
.expect("Docker build failed");
setup_env();
let mut run_args = RunEnclavesArgs {
enclave_cid: None,
eif_path: args.output,
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 128,
debug_mode: true,
attach_console: false,
enclave_name: None,
};
let names = Vec::new();
run_args.enclave_name =
Some(new_enclave_name(run_args.clone(), names).expect("Failed to set new name."));
let run_result = run_enclaves(&run_args, None).expect("Run enclaves failed");
let mut enclave_manager = run_result.enclave_manager;
get_enclave_describe_info(&enclave_manager, false).unwrap();
let enclave_name = enclave_manager.enclave_name.clone();
// Assert that EIF name has been set
assert_eq!(enclave_name, "test");
terminate_enclaves(&mut enclave_manager, None).expect("Terminate enclaves failed");
}
#[test]
fn new_enclave_names() {
let dir = tempdir().unwrap();
let eif_path = dir.path().join("test.eif");
let mut run_args = RunEnclavesArgs {
enclave_cid: None,
eif_path: eif_path.to_str().unwrap().to_string(),
cpu_ids: None,
cpu_count: Some(2),
memory_mib: 128,
debug_mode: true,
attach_console: false,
enclave_name: Some("enclaveName".to_string()),
};
let mut names = Vec::new();
let name =
new_enclave_name(run_args.clone(), names.clone()).expect("Failed to set new name.");
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | true |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/tests/test_nitro_cli_args.rs | tests/test_nitro_cli_args.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(warnings)]
#[cfg(test)]
mod test_nitro_cli_args {
use clap::{Arg, Command};
use nitro_cli::create_app;
#[test]
fn terminate_enclave_enclave_id_arg_is_required() {
let app = create_app!();
let args = vec!["nitro cli", "terminate-enclave"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn terminate_enclave_enclave_id_takes_value() {
let app = create_app!();
let args = vec!["nitro cli", "terminate-enclave", "--enclave-id"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn terminate_enclave_enclave_id_takes_one_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"terminate-enclave",
"--enclave-id",
"i-1234_enc123",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn terminate_enclave_enclave_id_takes_multiple_values() {
let app = create_app!();
let args = vec![
"nitro cli",
"terminate-enclave",
"--enclave-id",
"1234",
"135",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn terminate_enclave_name() {
let app = create_app!();
let args = vec![
"nitro cli",
"terminate-enclave",
"--enclave-name",
"testName",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn terminate_enclave_name_is_required() {
let app = create_app!();
let args = vec!["nitro cli", "terminate-enclave", "--enclave-name"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn terminate_enclave_name_takes_multiple_values() {
let app = create_app!();
let args = vec![
"nitro cli",
"terminate-enclave",
"--enclave-name",
"name1",
"name2",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn describe_enclaves_correct_command() {
let app = create_app!();
let args = vec!["nitro cli", "describe-enclaves"];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn describe_enclaves_request_metadata_correct() {
let app = create_app!();
let args = vec!["nitro cli", "describe-enclaves", "--metadata"];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn describe_eif_correct_command() {
let app = create_app!();
let args = vec!["nitro cli", "describe-eif", "--eif-path", "dir/image.eif"];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn describe_eif_without_path_arg() {
let app = create_app!();
let args = vec!["nitro cli", "describe-eif", "--eif-path"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn console_without_enclave_id_arg_is_required() {
let app = create_app!();
let args = vec!["nitro cli", "console"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn console_enclave_id_takes_value() {
let app = create_app!();
let args = vec!["nitro cli", "console", "--enclave-id"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn console_correct_command() {
let app = create_app!();
let args = vec!["nitro cli", "console", "--enclave-id", "i-1234_enc123"];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn console_enclave_id_takes_one_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"console",
"--enclave-id",
"i-1234_enc123",
"135",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn console_enclave_name() {
let app = create_app!();
let args = vec!["nitro cli", "console", "--enclave-name", "testName"];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn console_enclave_name_is_required() {
let app = create_app!();
let args = vec!["nitro cli", "console", "--enclave-name"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn console_enclave_name_takes_multiple_values() {
let app = create_app!();
let args = vec!["nitro cli", "console", "--enclave-name", "name1", "name2"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn console_correct_disconnect_timeout_command() {
let app = create_app!();
let args = vec![
"nitro cli",
"console",
"--enclave-id",
"i-1234_enc123",
"--disconnect-timeout",
"10",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn console_correct_disconnect_timeout_command_with_name() {
let app = create_app!();
let args = vec![
"nitro cli",
"console",
"--enclave-name",
"testName",
"--disconnect-timeout",
"10",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn console_disconnect_timeout_takes_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"console",
"--enclave-id",
"i-1234_enc123",
"--disconnect-timeout",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn build_enclave_docker_uri_arg_is_required() {
let app = create_app!();
let args = vec!["nitro cli", "build-enclave", "--output-file", "image.eif"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn build_enclave_docker_dir_arg_is_not_required() {
let app = create_app!();
let args = vec![
"nitro cli",
"build-enclave",
"--docker-uri",
"dkr.ecr.us-east-1.amazonaws.com/stronghold-develss",
"--output-file",
"image.eif",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn build_enclave_output_arg_is_required() {
let app = create_app!();
let args = vec![
"nitro cli",
"build-enclave",
"--docker-uri",
"dkr.ecr.us-east-1.amazonaws.com/stronghold-develss",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn build_enclave_correct_command() {
let app = create_app!();
let args = vec![
"nitro cli",
"build-enclave",
"--docker-uri",
"dkr.ecr.us-east-1.amazonaws.com/stronghold-develss",
"--docker-dir",
"dir/",
"--output-file",
"image.eif",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn build_signed_enclave_correct_command() {
let app = create_app!();
let args = vec![
"nitro cli",
"build-enclave",
"--docker-uri",
"dkr.ecr.us-east-1.amazonaws.com/stronghold-develss",
"--docker-dir",
"dir/",
"--output-file",
"image.eif",
"--signing-certificate",
"cert.pem",
"--private-key",
"key.pem",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn build_signed_enclave_correct_command_kms_arn() {
let app = create_app!();
let args = vec![
"nitro cli",
"build-enclave",
"--docker-uri",
"dkr.ecr.us-east-1.amazonaws.com/stronghold-develss",
"--docker-dir",
"dir/",
"--output-file",
"image.eif",
"--signing-certificate",
"cert.pem",
"--private-key",
"arn:aws:kms:us-west-2:111122223333:key/12345678-abcd-bcde-9876-ab0987654321",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn build_signed_enclave_missing_certificate() {
let app = create_app!();
let args = vec![
"nitro cli",
"build-enclave",
"--docker-uri",
"dkr.ecr.us-east-1.amazonaws.com/stronghold-develss",
"--docker-dir",
"dir/",
"--output-file",
"image.eif",
"--private-key",
"key.pem",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn sign_enclave_correct_command() {
let app = create_app!();
let args = vec![
"nitro cli",
"sign-eif",
"--eif-path",
"image.eif",
"--signing-certificate",
"cert.pem",
"--private-key",
"key.pem",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn sign_enclave_missing_certificate() {
let app = create_app!();
let args = vec![
"nitro cli",
"sign-eif",
"--eif-path",
"image.eif",
"--private-key",
"key.pem",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn sign_enclave_missing_key() {
let app = create_app!();
let args = vec![
"nitro cli",
"sign-eif",
"--eif-path",
"image.eif",
"--signing-certificate",
"cert.pem",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn build_enclave_with_metadata_correct_command() {
let app = create_app!();
let args = vec![
"nitro cli",
"build-enclave",
"--docker-uri",
"dkr.ecr.us-east-1.amazonaws.com/stronghold-develss",
"--docker-dir",
"dir/",
"--output-file",
"image.eif",
"--name",
"TestName",
"--version",
"4.0",
"--metadata",
"meta.json",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn build_enclave_with_metadata_file_is_required() {
let app = create_app!();
let args = vec![
"nitro cli",
"build-enclave",
"--docker-uri",
"dkr.ecr.us-east-1.amazonaws.com/stronghold-develss",
"--docker-dir",
"dir/",
"--output-file",
"image.eif",
"--name",
"TestName",
"--version",
"4.0",
"--metadata",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_correct_command_with_eif_path() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"10001",
"--memory",
"512",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
"1234",
"--debug-mode",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn run_enclave_cpu_ids_arg_is_required() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--memory",
"512",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
"12345",
"--debug-mode",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_cpu_ids_takes_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"--memory",
"512",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
"12345",
"--debug-mode",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_cpu_ids_takes_multiple_values() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"10001",
"--memory",
"512",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
"12345",
"--debug-mode",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn run_enclave_memory_arg_is_required() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
"12345",
"--debug-mode",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_memory_takes_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"--memory",
"--eif-path",
"dir/image.eif",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_enclave_cid_takes_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"--memory",
"512",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_console_does_not_take_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"--memory",
"512",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
"12345",
"--debug-mode",
"123",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_console_eif_path_is_required() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"--memory",
"512",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_eif_path_takes_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"10001",
"--memory",
"512",
"--eif-path",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_config_does_not_take_value() {
let app = create_app!();
let args = vec!["nitro cli", "run-enclave", "--config"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_config_takes_multiple_values() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--config",
"config1.json",
"config2.json",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_try_to_overwrite_config() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--config",
"config.json",
"--cpu-count",
"2",
"--memory",
"1024",
"--eif-path",
"dir/image.eif",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn run_enclave_correct_command_with_name() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"10001",
"--memory",
"512",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
"1234",
"--debug-mode",
"--enclave-name",
"testName",
];
assert!(app.try_get_matches_from(args).is_ok())
}
#[test]
fn run_enclave_name_takes_value() {
let app = create_app!();
let args = vec![
"nitro cli",
"run-enclave",
"--cpu-ids",
"10000",
"10001",
"--memory",
"512",
"--eif-path",
"dir/image.eif",
"--enclave-cid",
"1234",
"--debug-mode",
"--enclave-name",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn pcr_input_takes_value() {
let app = create_app!();
let args = vec!["nitro cli", "pcr", "--input"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn pcr_certificate_takes_value() {
let app = create_app!();
let args = vec!["nitro cli", "pcr", "--signing-certificate"];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn pcr_conflicting_arguments() {
let app = create_app!();
let args = vec![
"nitro cli",
"pcr",
"--signing-certificate",
"cert.pem",
"--input",
"test.bin",
];
assert!(app.try_get_matches_from(args).is_err())
}
#[test]
fn pcr_certificate_correct() {
let app = create_app!();
let args = vec!["nitro cli", "pcr", "--signing-certificate", "cert.pem"];
assert!(app.try_get_matches_from(args).is_ok())
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/tests/test_dev_driver.rs | tests/test_dev_driver.rs | // Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![deny(warnings)]
use libc::{VMADDR_CID_HOST, VMADDR_CID_LOCAL};
use std::fs::File;
use std::os::unix::io::{AsRawFd, RawFd};
use std::process::Command;
use driver_bindings::bindings::ne_enclave_start_info;
use nitro_cli::common::{NitroCliErrorEnum, NitroCliFailure, NitroCliResult};
use nitro_cli::enclave_proc::cpu_info::CpuInfo;
use nitro_cli::enclave_proc::resource_manager::{
EnclaveStartInfo, MemoryRegion, NE_ADD_VCPU, NE_CREATE_VM, NE_SET_USER_MEMORY_REGION,
NE_START_ENCLAVE,
};
use nitro_cli::enclave_proc::utils::MiB;
const ENCLAVE_MEM_2MB_CHUNKS: u64 = 48;
#[cfg(target_arch = "aarch64")]
const ENCLAVE_MEM_32MB_CHUNKS: u64 = 3;
pub const NE_DEVICE_PATH: &str = "/dev/nitro_enclaves";
/// This is similar to `MemoryRegion`, except it doesn't implement `Drop`.
#[allow(dead_code)]
pub struct EnclaveMemoryRegion {
/// Flags to determine the usage for the memory region.
flags: u64,
/// The region's size in bytes.
mem_size: u64,
/// The region's virtual address.
mem_addr: u64,
}
impl EnclaveMemoryRegion {
fn new(flags: u64, mem_addr: u64, mem_size: u64) -> Self {
EnclaveMemoryRegion {
flags,
mem_size,
mem_addr,
}
}
fn new_from(region: &MemoryRegion) -> Self {
EnclaveMemoryRegion {
flags: 0,
mem_size: region.mem_size(),
mem_addr: region.mem_addr(),
}
}
}
/// Class that covers communication with the NE driver.
pub struct NitroEnclavesDeviceDriver {
// NE device file.
file: File,
}
impl NitroEnclavesDeviceDriver {
/// Open the file descriptor for communicating with the NE driver.
pub fn new() -> NitroCliResult<Self> {
Ok(NitroEnclavesDeviceDriver {
file: File::open(NE_DEVICE_PATH).map_err(|e| {
NitroCliFailure::new()
.add_subaction(format!("Could not open {NE_DEVICE_PATH}: {e}"))
.set_error_code(NitroCliErrorEnum::FileOperationFailure)
.set_file_and_line(file!(), line!())
.add_info(vec![NE_DEVICE_PATH, "Open"])
})?,
})
}
/// Allocate an enclave slot and return an enclave fd.
pub fn create_enclave(&mut self) -> NitroCliResult<NitroEnclave> {
let mut slot_uid: u64 = 0;
// This is safe because we are providing valid values.
let enc_fd =
unsafe { libc::ioctl(self.file.as_raw_fd(), NE_CREATE_VM as _, &mut slot_uid) };
if enc_fd < 0 {
return Err(NitroCliFailure::new()
.add_subaction(format!("Could not create an enclave descriptor: {enc_fd}"))
.set_error_code(NitroCliErrorEnum::IoctlFailure)
.set_file_and_line(file!(), line!()));
}
if slot_uid == 0 {
return Err(NitroCliFailure::new()
.add_subaction("Obtained invalid slot ID".to_string())
.set_error_code(NitroCliErrorEnum::IoctlFailure)
.set_file_and_line(file!(), line!()));
}
Ok(NitroEnclave::new(enc_fd).unwrap())
}
}
/// Class for managing a Nitro Enclave provided by NitroEnclavesDeviceDriver.
pub struct NitroEnclave {
enc_fd: RawFd,
}
impl NitroEnclave {
pub fn new(enc_fd: RawFd) -> NitroCliResult<Self> {
Ok(NitroEnclave { enc_fd })
}
fn release(&mut self) {
// Close enclave descriptor.
let rc = unsafe { libc::close(self.enc_fd) };
if rc < 0 {
panic!("Could not close enclave descriptor: {}.", rc)
}
}
pub fn add_mem_region(&mut self, mem_region: EnclaveMemoryRegion) -> NitroCliResult<()> {
let rc = unsafe { libc::ioctl(self.enc_fd, NE_SET_USER_MEMORY_REGION as _, &mem_region) };
if rc < 0 {
return Err(NitroCliFailure::new()
.add_subaction(format!("Could not add memory region: {rc}"))
.set_error_code(NitroCliErrorEnum::IoctlSetMemoryRegionFailure)
.set_file_and_line(file!(), line!()));
}
Ok(())
}
pub fn add_cpu(&mut self, cpu_id: u32) -> NitroCliResult<()> {
let mut actual_cpu_id: u32 = cpu_id;
let rc = unsafe { libc::ioctl(self.enc_fd, NE_ADD_VCPU as _, &mut actual_cpu_id) };
if rc < 0 {
return Err(NitroCliFailure::new()
.add_subaction(format!("Could not add vCPU: {rc}"))
.set_error_code(NitroCliErrorEnum::IoctlAddVcpuFailure)
.set_file_and_line(file!(), line!()));
}
Ok(())
}
pub fn start(&mut self, start_info: EnclaveStartInfo) -> NitroCliResult<()> {
let rc = unsafe { libc::ioctl(self.enc_fd, NE_START_ENCLAVE as _, &start_info) };
if rc < 0 {
return Err(NitroCliFailure::new()
.add_subaction(format!("Could not start enclave: {rc}"))
.set_error_code(NitroCliErrorEnum::IoctlEnclaveStartFailure)
.set_file_and_line(file!(), line!()));
}
Ok(())
}
}
impl Drop for NitroEnclave {
fn drop(&mut self) {
if self.enc_fd < 0 {
return;
}
self.release();
}
}
// Class for checking the dmesg logs.
pub struct CheckDmesg {
recorded_line: usize,
}
impl CheckDmesg {
pub fn new() -> NitroCliResult<Self> {
Ok(CheckDmesg { recorded_line: 0 })
}
/// Obtain the log lines from dmesg.
fn get_dmesg_lines(&mut self) -> NitroCliResult<Vec<String>> {
let dmesg = Command::new("dmesg")
.output()
.expect("Failed to execute dmesg process");
let message = String::from_utf8(dmesg.stdout).unwrap();
let lines: Vec<String> = message.split('\n').map(|s| s.to_string()).collect();
Ok(lines)
}
/// Record the current number of lines from dmesg.
pub fn record_current_line(&mut self) -> NitroCliResult<()> {
self.recorded_line = self.get_dmesg_lines().unwrap().len();
Ok(())
}
/// Verify if dmesg number of lines changed from the last recorded line.
pub fn expect_no_changes(&mut self) -> NitroCliResult<()> {
let checks = [
"WARNING",
"BUG",
"ERROR",
"FAILURE",
"nitro_enclaves",
// NE PCI device identifier
"pci 0000:00:02.0",
];
let lines = self.get_dmesg_lines().unwrap();
for line in lines.iter().skip(self.recorded_line) {
let upper_line = line.to_uppercase();
for word in checks.iter() {
if upper_line.contains(&word.to_uppercase()) {
return Err(NitroCliFailure::new()
.add_subaction(format!("Dmesg line: {line} contains: {word}"))
.set_error_code(NitroCliErrorEnum::IoctlFailure)
.set_file_and_line(file!(), line!()));
}
}
}
Ok(())
}
}
#[cfg(test)]
mod test_dev_driver {
use super::*;
#[test]
pub fn test_ne_dev_open() {
let mut driver = NitroEnclavesDeviceDriver::new().expect("Failed to open NE device");
let enc_fd = unsafe { libc::ioctl(driver.file.as_raw_fd(), NE_CREATE_VM as _, 0) };
assert!(
enc_fd < 0,
"Should not have been able to create enclave descriptor"
);
// Test unexpected ioctl.
let enc_fd =
unsafe { libc::ioctl(driver.file.as_raw_fd(), NE_SET_USER_MEMORY_REGION as _, 0) };
assert!(
enc_fd < 0,
"Should not have been able to create enclave with invalid ioctl"
);
let mut slot_alloc_num: u64 = 1;
if let Ok(value) = std::env::var("NE_SLOT_ALLOC_NUM") {
if let Ok(value) = value.parse::<u64>() {
slot_alloc_num = value;
}
}
let mut check_dmesg = CheckDmesg::new().expect("Failed to obtain dmesg object");
check_dmesg
.record_current_line()
.expect("Failed to record current line");
for _i in 0..slot_alloc_num {
// Allocate Nitro Enclave slot and free it.
let _enclave = driver.create_enclave().unwrap();
}
check_dmesg.expect_no_changes().unwrap();
}
#[test]
pub fn test_enclave_memory() {
let mut driver = NitroEnclavesDeviceDriver::new().expect("Failed to open NE device");
let mut enclave = driver.create_enclave().unwrap();
// Add invalid memory region.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new(0, 0, 2 * MiB));
assert!(result.is_err());
// Create a memory region using hugetlbfs.
let region = MemoryRegion::new(libc::MAP_HUGE_2MB).unwrap();
// Add unaligned memory region.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new(
0,
region.mem_addr() + 1,
region.mem_size(),
));
assert!(result.is_err());
// Add wrongly sized memory region of 1 MiB.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new(
0,
region.mem_addr(),
region.mem_size() / 2,
));
assert!(result.is_err());
// Add wrongly sized memory region of double the memory size.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new(
0,
region.mem_addr(),
region.mem_size() * 2,
));
assert!(result.is_err());
// Add wrongly sized memory region of max value multiple of 2 MiB.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new(
0,
region.mem_addr(),
u64::MAX - (2 * 1024 * 1024) + 1,
));
assert!(result.is_err());
// Add wrong memory region with address out of range.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new(
0,
region.mem_addr() + region.mem_size(),
region.mem_size(),
));
assert!(result.is_err());
let mut check_dmesg = CheckDmesg::new().expect("Failed to obtain dmesg object");
check_dmesg
.record_current_line()
.expect("Failed to record current line");
// Correctly add the memory region.
let region = MemoryRegion::new(libc::MAP_HUGE_2MB).unwrap();
let result = enclave.add_mem_region(EnclaveMemoryRegion::new_from(®ion));
assert!(result.is_ok());
check_dmesg.expect_no_changes().unwrap();
// Add the same memory region twice.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new_from(®ion));
assert!(result.is_err());
// Add a memory region with invalid flags.
let region = MemoryRegion::new(libc::MAP_HUGE_2MB).unwrap();
let result = enclave.add_mem_region(EnclaveMemoryRegion::new(
1024,
region.mem_addr(),
region.mem_size(),
));
assert!(result.is_err());
}
#[test]
pub fn test_enclave_vcpu() {
let mut driver = NitroEnclavesDeviceDriver::new().expect("Failed to open NE device");
let mut enclave = driver.create_enclave().unwrap();
let cpu_info = CpuInfo::new().expect("Failed to obtain CpuInfo.");
// Add an invalid cpu id.
let result = enclave.add_cpu(u32::MAX);
assert!(result.is_err());
let mut candidates = cpu_info.get_cpu_candidates();
// Instance does not have the appropriate number of cpus.
if candidates.is_empty() {
return;
}
let cpu_id = candidates.pop().unwrap();
let mut check_dmesg = CheckDmesg::new().expect("Failed to obtain dmesg object");
check_dmesg
.record_current_line()
.expect("Failed to record current line");
// Insert the first valid cpu id.
let result = enclave.add_cpu(cpu_id);
assert!(result.is_ok());
check_dmesg.expect_no_changes().unwrap();
// Try inserting the cpu twice.
let result = enclave.add_cpu(cpu_id);
assert!(result.is_err());
check_dmesg
.record_current_line()
.expect("Failed to record current line");
// Add all remaining cpus.
for cpu in &candidates {
let result = enclave.add_cpu(*cpu);
assert!(result.is_ok());
}
check_dmesg.expect_no_changes().unwrap();
// Clear the enclave.
drop(enclave);
let mut enclave = driver.create_enclave().unwrap();
check_dmesg
.record_current_line()
.expect("Failed to record current line");
// Add an auto-chosen cpu from the pool.
let result = enclave.add_cpu(0);
assert!(result.is_ok());
check_dmesg.expect_no_changes().unwrap();
check_dmesg
.record_current_line()
.expect("Failed to record current line");
// Add all remaining auto-chosen cpus.
for _i in 0..candidates.len() {
let result = enclave.add_cpu(0);
assert!(result.is_ok());
}
check_dmesg.expect_no_changes().unwrap();
// Add one more cpu than the maximum available in the pool.
let result = enclave.add_cpu(0);
assert!(result.is_err());
}
#[test]
pub fn test_enclave_start() {
let mut mem_regions = Vec::new();
let mut driver = NitroEnclavesDeviceDriver::new().expect("Failed to open NE device");
let mut enclave = driver.create_enclave().unwrap();
// Start enclave without resources.
let result = enclave.start(EnclaveStartInfo::default());
assert!(result.is_err());
// Allocate memory for the enclave.
#[cfg(target_arch = "x86_64")]
for _i in 0..ENCLAVE_MEM_2MB_CHUNKS {
mem_regions.push(MemoryRegion::new(libc::MAP_HUGE_2MB).unwrap());
}
#[cfg(target_arch = "aarch64")]
{
let mut mem_2mb_chunks = ENCLAVE_MEM_2MB_CHUNKS;
for _i in 0..ENCLAVE_MEM_32MB_CHUNKS {
let region = MemoryRegion::new(libc::MAP_HUGE_32MB);
if region.is_err() {
break;
}
mem_regions.push(region.unwrap());
mem_2mb_chunks = mem_2mb_chunks - (32 / 2);
}
for _i in 0..mem_2mb_chunks {
mem_regions.push(MemoryRegion::new(libc::MAP_HUGE_2MB).unwrap());
}
}
// Add memory to the enclave.
for region in &mut mem_regions {
let result = enclave.add_mem_region(EnclaveMemoryRegion::new_from(region));
assert!(result.is_ok());
}
// Start the enclave without cpus.
let result = enclave.start(EnclaveStartInfo::default());
assert!(result.is_err());
let cpu_info = CpuInfo::new().expect("Failed to obtain CpuInfo.");
let candidates = cpu_info.get_cpu_candidates();
// Instance does not have the appropriate number of cpus.
if candidates.len() < 2 {
return;
}
// Clear the enclave.
drop(enclave);
let mut enclave = driver.create_enclave().unwrap();
for cpu in &candidates {
let result = enclave.add_cpu(*cpu);
assert!(result.is_ok());
}
// Start enclave without memory.
let result = enclave.start(EnclaveStartInfo::default());
assert!(result.is_err());
drop(enclave);
let mut enclave = driver.create_enclave().unwrap();
// Add memory to the enclave.
for region in &mut mem_regions {
let result = enclave.add_mem_region(EnclaveMemoryRegion::new_from(region));
assert!(result.is_ok());
}
// Add the first available cpu.
let result = enclave.add_cpu(candidates[0]);
assert!(result.is_ok());
// Start without cpu pair.
#[cfg(target_arch = "aarch64")]
let mut check_dmesg = CheckDmesg::new().expect("Failed to obtain dmesg object");
#[cfg(target_arch = "aarch64")]
check_dmesg
.record_current_line()
.expect("Failed to record current line");
let result = enclave.start(EnclaveStartInfo::default());
#[cfg(target_arch = "x86_64")]
assert!(result.is_err());
#[cfg(target_arch = "aarch64")]
assert_eq!(result.is_err(), false);
#[cfg(target_arch = "aarch64")]
check_dmesg.expect_no_changes().unwrap();
#[cfg(target_arch = "aarch64")]
drop(enclave);
#[cfg(target_arch = "aarch64")]
let mut enclave = driver.create_enclave().unwrap();
// Add memory to the enclave.
#[cfg(target_arch = "aarch64")]
for region in &mut mem_regions {
let result = enclave.add_mem_region(EnclaveMemoryRegion::new_from(region));
assert_eq!(result.is_err(), false);
}
// Add the first available cpu.
#[cfg(target_arch = "aarch64")]
let result = enclave.add_cpu(candidates[0]);
#[cfg(target_arch = "aarch64")]
assert_eq!(result.is_err(), false);
// Add the first cpu pair.
let result = enclave.add_cpu(candidates[1]);
assert!(result.is_ok());
// Start with an invalid flag.
let enclave_start_info = ne_enclave_start_info {
flags: 1234,
..Default::default()
};
let result = enclave.start(enclave_start_info);
assert!(result.is_err());
// Start with an invalid CID.
let mut enclave_start_info = ne_enclave_start_info {
enclave_cid: VMADDR_CID_LOCAL as u64,
..Default::default()
};
let result = enclave.start(enclave_start_info);
assert!(result.is_err());
enclave_start_info.enclave_cid = VMADDR_CID_HOST as u64;
let result = enclave.start(enclave_start_info);
assert!(result.is_err());
enclave_start_info.enclave_cid = u32::MAX as u64;
let result = enclave.start(enclave_start_info);
assert!(result.is_err());
enclave_start_info.enclave_cid = u32::MAX as u64 + 1234_u64;
let result = enclave.start(enclave_start_info);
assert!(result.is_err());
let mut check_dmesg = CheckDmesg::new().expect("Failed to obtain dmesg object");
check_dmesg
.record_current_line()
.expect("Failed to record current line");
// Start the enclave.
let result = enclave.start(EnclaveStartInfo::default());
assert!(result.is_ok());
check_dmesg.expect_no_changes().unwrap();
// Try starting an already running enclave.
let result = enclave.start(EnclaveStartInfo::default());
assert!(result.is_err());
// Try adding an already added memory region
// after the enclave start.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new_from(&mem_regions[0]));
assert!(result.is_err());
// Try adding a new memory region after the enclave start.
let result = enclave.add_mem_region(EnclaveMemoryRegion::new_from(
&MemoryRegion::new(libc::MAP_HUGE_2MB).unwrap(),
));
assert!(result.is_err());
// Try adding an already added vcpu after enclave start.
let result = enclave.add_cpu(candidates[0]);
assert!(result.is_err());
// Try adding a new vcpu after enclave start.
if candidates.len() >= 3 {
let result = enclave.add_cpu(candidates[2]);
assert!(result.is_err());
}
}
#[test]
pub fn test_enclave_multiple_start() {
let mut mem_regions = Vec::new();
let mut driver = NitroEnclavesDeviceDriver::new().expect("Failed to open NE device");
// Allocate memory for the enclave.
#[cfg(target_arch = "x86_64")]
for _i in 0..ENCLAVE_MEM_2MB_CHUNKS {
mem_regions.push(MemoryRegion::new(libc::MAP_HUGE_2MB).unwrap());
}
#[cfg(target_arch = "aarch64")]
{
let mut mem_2mb_chunks = ENCLAVE_MEM_2MB_CHUNKS;
for _i in 0..ENCLAVE_MEM_32MB_CHUNKS {
let region = MemoryRegion::new(libc::MAP_HUGE_32MB);
if region.is_err() {
break;
}
mem_regions.push(region.unwrap());
mem_2mb_chunks = mem_2mb_chunks - (32 / 2);
}
for _i in 0..mem_2mb_chunks {
mem_regions.push(MemoryRegion::new(libc::MAP_HUGE_2MB).unwrap());
}
}
let cpu_info = CpuInfo::new().expect("Failed to obtain CpuInfo.");
let candidates = cpu_info.get_cpu_candidates();
// Instance does not have the appropriate number of cpus.
if candidates.len() < 2 {
return;
}
let mut start_num: u64 = 1;
if let Ok(value) = std::env::var("NE_MULTIPLE_START_NUM") {
if let Ok(value) = value.parse::<u64>() {
start_num = value;
}
}
for _i in 0..start_num {
let mut enclave = driver.create_enclave().unwrap();
// Add memory to the enclave.
for region in &mut mem_regions {
let result = enclave.add_mem_region(EnclaveMemoryRegion::new_from(region));
assert!(result.is_ok());
}
// Add cpus to the enclave.
for cpu in &candidates {
let result = enclave.add_cpu(*cpu);
assert!(result.is_ok());
}
// Start and stop the enclave
let result = enclave.start(EnclaveStartInfo::default());
assert!(result.is_ok());
}
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/samples/command_executer/src/protocol_helpers.rs | samples/command_executer/src/protocol_helpers.rs | use byteorder::{ByteOrder, LittleEndian};
use nix::sys::socket::MsgFlags;
use nix::sys::socket::{recv, send};
use std::convert::TryInto;
use std::os::unix::io::RawFd;
pub fn send_u64(fd: RawFd, val: u64) -> Result<(), String> {
let mut buf = [0u8; 9];
LittleEndian::write_u64(&mut buf, val);
send_loop(fd, &buf, 9)?;
Ok(())
}
pub fn recv_u64(fd: RawFd) -> Result<u64, String> {
let mut buf = [0u8; 9];
recv_loop(fd, &mut buf, 9)?;
let val = LittleEndian::read_u64(&buf);
Ok(val)
}
pub fn send_i32(fd: RawFd, val: i32) -> Result<(), String> {
let mut buf = [0u8; 4];
LittleEndian::write_i32(&mut buf, val);
send_loop(fd, &buf, 4)?;
Ok(())
}
pub fn recv_i32(fd: RawFd) -> Result<i32, String> {
let mut buf = [0u8; 4];
recv_loop(fd, &mut buf, 4)?;
let val = LittleEndian::read_i32(&buf);
Ok(val)
}
pub fn send_loop(fd: RawFd, buf: &[u8], len: u64) -> Result<(), String> {
let len: usize = len.try_into().map_err(|err| format!("{err:?}"))?;
let mut send_bytes = 0;
while send_bytes < len {
let size = match send(fd, &buf[send_bytes..len], MsgFlags::empty()) {
Ok(size) => size,
Err(nix::errno::Errno::EINTR) => 0,
Err(err) => return Err(format!("{err:?}")),
};
send_bytes += size;
}
Ok(())
}
pub fn recv_loop(fd: RawFd, buf: &mut [u8], len: u64) -> Result<(), String> {
let len: usize = len.try_into().map_err(|err| format!("{err:?}"))?;
let mut recv_bytes = 0;
while recv_bytes < len {
let size = match recv(fd, &mut buf[recv_bytes..len], MsgFlags::empty()) {
Ok(0) => return Err(format!("{:?}", "Peer closed connection")),
Ok(size) => size,
Err(nix::errno::Errno::EINTR) => 0,
Err(err) => return Err(format!("{err:?}")),
};
recv_bytes += size;
}
Ok(())
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/samples/command_executer/src/lib.rs | samples/command_executer/src/lib.rs | pub mod command_parser;
pub mod protocol_helpers;
pub mod utils;
use command_parser::{CommandOutput, FileArgs, ListenArgs, RunArgs};
use protocol_helpers::{recv_loop, recv_u64, send_loop, send_u64};
use nix::sys::socket::listen as listen_vsock;
use nix::sys::socket::{accept, bind, connect, shutdown, socket};
use nix::sys::socket::{AddressFamily, Shutdown, SockFlag, SockType, VsockAddr};
use nix::unistd::close;
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
use std::cmp::min;
use std::convert::TryInto;
use std::fs::File;
use std::io::{Read, Write};
use std::os::unix::io::{AsRawFd, RawFd};
use std::process::Command;
pub const VMADDR_CID_ANY: u32 = 0xFFFFFFFF;
pub const BUF_MAX_LEN: usize = 8192;
pub const BACKLOG: usize = 128;
const MAX_CONNECTION_ATTEMPTS: usize = 5;
#[derive(Debug, Clone, FromPrimitive)]
enum CmdId {
RunCmd = 0,
RecvFile,
SendFile,
RunCmdNoWait,
}
struct VsockSocket {
socket_fd: RawFd,
}
impl VsockSocket {
fn new(socket_fd: RawFd) -> Self {
VsockSocket { socket_fd }
}
}
impl Drop for VsockSocket {
fn drop(&mut self) {
shutdown(self.socket_fd, Shutdown::Both)
.unwrap_or_else(|e| eprintln!("Failed to shut socket down: {e:?}"));
close(self.socket_fd).unwrap_or_else(|e| eprintln!("Failed to close socket: {e:?}"));
}
}
impl AsRawFd for VsockSocket {
fn as_raw_fd(&self) -> RawFd {
self.socket_fd
}
}
fn vsock_connect(cid: u32, port: u32) -> Result<VsockSocket, String> {
let sockaddr = VsockAddr::new(cid, port);
let mut err_msg = String::new();
for i in 0..MAX_CONNECTION_ATTEMPTS {
let vsocket = VsockSocket::new(
socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::empty(),
None,
)
.map_err(|err| format!("Failed to create the socket: {err:?}"))?,
);
match connect(vsocket.as_raw_fd(), &sockaddr) {
Ok(_) => return Ok(vsocket),
Err(e) => err_msg = format!("Failed to connect: {e}"),
}
std::thread::sleep(std::time::Duration::from_secs(1 << i));
}
Err(err_msg)
}
fn run_server(fd: RawFd, no_wait: bool) -> Result<(), String> {
// recv command
let len = recv_u64(fd)?;
let mut buf = [0u8; BUF_MAX_LEN];
recv_loop(fd, &mut buf, len)?;
let len_usize = len.try_into().map_err(|err| format!("{err:?}"))?;
let command = std::str::from_utf8(&buf[0..len_usize]).map_err(|err| format!("{err:?}"))?;
// execute command
let command_output = if no_wait {
#[rustfmt::skip]
let output = Command::new("sh")
.arg("-c")
.arg(command)
.spawn();
if output.is_err() {
CommandOutput::new(
String::new(),
format!("Could not execute the command {command}"),
1,
)
} else {
CommandOutput::new(String::new(), String::new(), 0)
}
} else {
let output = Command::new("sh")
.arg("-c")
.arg(command)
.output()
.map_err(|err| format!("Could not execute the command {command}: {err:?}"))?;
CommandOutput::new_from(output)?
};
// send output
let json_output = serde_json::to_string(&command_output)
.map_err(|err| format!("Could not serialize the output: {err:?}"))?;
let buf = json_output.as_bytes();
let len: u64 = buf.len().try_into().map_err(|err| format!("{err:?}"))?;
send_u64(fd, len)?;
send_loop(fd, buf, len)?;
Ok(())
}
fn recv_file_server(fd: RawFd) -> Result<(), String> {
// recv file path
let len = recv_u64(fd)?;
let mut buf = [0u8; BUF_MAX_LEN];
recv_loop(fd, &mut buf, len)?;
let len_usize = len.try_into().map_err(|err| format!("{err:?}"))?;
let path = std::str::from_utf8(&buf[0..len_usize]).map_err(|err| format!("{err:?}"))?;
let mut file = File::open(path).map_err(|err| format!("Could not open file {err:?}"))?;
let filesize = file
.metadata()
.map_err(|err| format!("Could not get file metadata {err:?}"))?
.len();
send_u64(fd, filesize)?;
println!("Sending file {path} - size {filesize}");
let mut progress: u64 = 0;
let mut tmpsize: u64;
while progress < filesize {
tmpsize = buf.len().try_into().map_err(|err| format!("{err:?}"))?;
tmpsize = min(tmpsize, filesize - progress);
file.read_exact(&mut buf[..tmpsize.try_into().map_err(|err| format!("{err:?}"))?])
.map_err(|err| format!("Could not read {err:?}"))?;
send_loop(fd, &buf, tmpsize)?;
progress += tmpsize
}
Ok(())
}
fn send_file_server(fd: RawFd) -> Result<(), String> {
// recv file path
let len = recv_u64(fd)?;
let mut buf = [0u8; BUF_MAX_LEN];
recv_loop(fd, &mut buf, len)?;
let len_usize = len.try_into().map_err(|err| format!("{err:?}"))?;
let path = std::str::from_utf8(&buf[0..len_usize]).map_err(|err| format!("{err:?}"))?;
let mut file = File::create(path).map_err(|err| format!("Could not open file {err:?}"))?;
// Receive filesize
let filesize = recv_u64(fd)?;
println!("Receiving file {path} - size {filesize}");
let mut progress: u64 = 0;
let mut tmpsize: u64;
while progress < filesize {
tmpsize = buf.len().try_into().map_err(|err| format!("{err:?}"))?;
tmpsize = min(tmpsize, filesize - progress);
recv_loop(fd, &mut buf, tmpsize)?;
file.write_all(&buf[..tmpsize.try_into().map_err(|err| format!("{err:?}"))?])
.map_err(|err| format!("Could not write {err:?}"))?;
progress += tmpsize
}
Ok(())
}
pub fn listen(args: ListenArgs) -> Result<(), String> {
let socket_fd = socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::empty(),
None,
)
.map_err(|err| format!("Create socket failed: {err:?}"))?;
let sockaddr = VsockAddr::new(VMADDR_CID_ANY, args.port);
bind(socket_fd, &sockaddr).map_err(|err| format!("Bind failed: {err:?}"))?;
listen_vsock(socket_fd, BACKLOG).map_err(|err| format!("Listen failed: {err:?}"))?;
loop {
let fd = accept(socket_fd).map_err(|err| format!("Accept failed: {err:?}"))?;
//cmd id
let cmdid = match recv_u64(fd) {
Ok(id_u64) => match CmdId::from_u64(id_u64) {
Some(c) => c,
_ => {
eprintln!("Error no such command");
continue;
}
},
Err(e) => {
eprintln!("Error {e}");
continue;
}
};
match cmdid {
CmdId::RunCmd => {
if let Err(e) = run_server(fd, false) {
eprintln!("Error {e}");
}
}
CmdId::RecvFile => {
if let Err(e) = recv_file_server(fd) {
eprintln!("Error {e}");
}
}
CmdId::SendFile => {
if let Err(e) = send_file_server(fd) {
eprintln!("Error {e}");
}
}
CmdId::RunCmdNoWait => {
if let Err(e) = run_server(fd, true) {
eprintln!("Error {e}");
}
}
}
}
}
pub fn run(args: RunArgs) -> Result<i32, String> {
let vsocket = vsock_connect(args.cid, args.port)?;
let socket_fd = vsocket.as_raw_fd();
// Send command id
if args.no_wait {
send_u64(socket_fd, CmdId::RunCmdNoWait as u64)?;
} else {
send_u64(socket_fd, CmdId::RunCmd as u64)?;
}
// send command
let buf = args.command.as_bytes();
let len: u64 = buf.len().try_into().map_err(|err| format!("{err:?}"))?;
send_u64(socket_fd, len)?;
send_loop(socket_fd, buf, len)?;
// recv output
let mut buf = [0u8; BUF_MAX_LEN];
let len = recv_u64(socket_fd)?;
let mut json_output = String::new();
let mut to_recv = len;
while to_recv > 0 {
let recv_len = min(BUF_MAX_LEN as u64, to_recv);
recv_loop(socket_fd, &mut buf, recv_len)?;
to_recv -= recv_len;
let to_recv_usize: usize = recv_len.try_into().map_err(|err| format!("{err:?}"))?;
json_output.push_str(
std::str::from_utf8(&buf[0..to_recv_usize]).map_err(|err| format!("{err:?}"))?,
);
}
let output: CommandOutput = serde_json::from_str(json_output.as_str())
.map_err(|err| format!("Could not deserialize the output: {err:?}"))?;
print!("{}", output.stdout);
eprint!("{}", output.stderr);
Ok(output.rc.unwrap_or_default())
}
pub fn recv_file(args: FileArgs) -> Result<(), String> {
let mut file =
File::create(&args.localfile).map_err(|err| format!("Could not open localfile {err:?}"))?;
let vsocket = vsock_connect(args.cid, args.port)?;
let socket_fd = vsocket.as_raw_fd();
// Send command id
send_u64(socket_fd, CmdId::RecvFile as u64)?;
// send remotefile path
let buf = args.remotefile.as_bytes();
let len: u64 = buf.len().try_into().map_err(|err| format!("{err:?}"))?;
send_u64(socket_fd, len)?;
send_loop(socket_fd, buf, len)?;
// Receive filesize
let filesize = recv_u64(socket_fd)?;
println!(
"Receiving file {}(saving to {}) - size {}",
&args.remotefile,
&args.localfile[..],
filesize
);
let mut progress: u64 = 0;
let mut tmpsize: u64;
let mut buf = [0u8; BUF_MAX_LEN];
while progress < filesize {
tmpsize = buf.len().try_into().map_err(|err| format!("{err:?}"))?;
tmpsize = min(tmpsize, filesize - progress);
recv_loop(socket_fd, &mut buf, tmpsize)?;
file.write_all(&buf[..tmpsize.try_into().map_err(|err| format!("{err:?}"))?])
.map_err(|err| format!("Could not write {err:?}"))?;
progress += tmpsize
}
Ok(())
}
pub fn send_file(args: FileArgs) -> Result<(), String> {
let mut file =
File::open(&args.localfile).map_err(|err| format!("Could not open localfile {err:?}"))?;
let vsocket = vsock_connect(args.cid, args.port)?;
let socket_fd = vsocket.as_raw_fd();
// Send command id
send_u64(socket_fd, CmdId::SendFile as u64)?;
// send remotefile path
let buf = args.remotefile.as_bytes();
let len: u64 = buf.len().try_into().map_err(|err| format!("{err:?}"))?;
send_u64(socket_fd, len)?;
send_loop(socket_fd, buf, len)?;
let filesize = file
.metadata()
.map_err(|err| format!("Could not get file metadate {err:?}"))?
.len();
send_u64(socket_fd, filesize)?;
println!(
"Sending file {}(sending to {}) - size {}",
&args.localfile,
&args.remotefile[..],
filesize
);
let mut buf = [0u8; BUF_MAX_LEN];
let mut progress: u64 = 0;
let mut tmpsize: u64;
while progress < filesize {
tmpsize = buf.len().try_into().map_err(|err| format!("{err:?}"))?;
tmpsize = min(tmpsize, filesize - progress);
file.read_exact(&mut buf[..tmpsize.try_into().map_err(|err| format!("{err:?}"))?])
.map_err(|err| format!("Could not read {err:?}"))?;
send_loop(socket_fd, &buf, tmpsize)?;
progress += tmpsize
}
Ok(())
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/samples/command_executer/src/command_parser.rs | samples/command_executer/src/command_parser.rs | use serde::{Deserialize, Serialize};
use std::process::Output;
use clap::ArgMatches;
#[derive(Debug, Clone)]
pub struct ListenArgs {
pub port: u32,
}
impl ListenArgs {
pub fn new_with(args: &ArgMatches) -> Result<Self, String> {
Ok(ListenArgs {
port: parse_port(args)?,
})
}
}
#[derive(Debug, Clone)]
pub struct RunArgs {
pub cid: u32,
pub port: u32,
pub command: String,
pub no_wait: bool,
}
impl RunArgs {
pub fn new_with(args: &ArgMatches) -> Result<Self, String> {
Ok(RunArgs {
cid: parse_cid(args)?,
port: parse_port(args)?,
command: parse_command(args)?,
no_wait: parse_no_wait(args),
})
}
}
#[derive(Debug, Clone)]
pub struct FileArgs {
pub cid: u32,
pub port: u32,
pub localfile: String,
pub remotefile: String,
}
impl FileArgs {
pub fn new_with(args: &ArgMatches) -> Result<Self, String> {
Ok(FileArgs {
cid: parse_cid(args)?,
port: parse_port(args)?,
localfile: parse_localfile(args)?,
remotefile: parse_remotefile(args)?,
})
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CommandOutput {
pub stdout: String,
pub stderr: String,
pub rc: Option<i32>,
}
impl CommandOutput {
pub fn new(stdout: String, stderr: String, code: i32) -> Self {
CommandOutput {
stdout,
stderr,
rc: Some(code),
}
}
pub fn new_from(output: Output) -> Result<Self, String> {
Ok(CommandOutput {
stdout: String::from_utf8(output.stdout).map_err(|err| format!("{err:?}"))?,
stderr: String::from_utf8(output.stderr).map_err(|err| format!("{err:?}"))?,
rc: output.status.code(),
})
}
}
fn parse_cid(args: &ArgMatches) -> Result<u32, String> {
args.get_one::<String>("cid")
.ok_or("Could not find cid argument")?
.parse()
.map_err(|_err| "cid is not a number".to_string())
}
fn parse_port(args: &ArgMatches) -> Result<u32, String> {
args.get_one::<String>("port")
.ok_or("Could not find port argument")?
.parse()
.map_err(|_err| "port is not a number".to_string())
}
fn parse_command(args: &ArgMatches) -> Result<String, String> {
args.get_one::<String>("command")
.map(String::from)
.ok_or_else(|| "Could not find command argument".to_string())
}
fn parse_no_wait(args: &ArgMatches) -> bool {
args.get_flag("no-wait")
}
fn parse_localfile(args: &ArgMatches) -> Result<String, String> {
args.get_one::<String>("localpath")
.map(String::from)
.ok_or_else(|| "Could not find localpath".to_string())
}
fn parse_remotefile(args: &ArgMatches) -> Result<String, String> {
args.get_one::<String>("remotepath")
.map(String::from)
.ok_or_else(|| "Could not find remotepath".to_string())
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/samples/command_executer/src/utils.rs | samples/command_executer/src/utils.rs | use log::error;
pub trait ExitGracefully<T, E> {
fn ok_or_exit(self, message: &str) -> T;
}
impl<T, E: std::fmt::Debug> ExitGracefully<T, E> for Result<T, E> {
fn ok_or_exit(self, message: &str) -> T {
match self {
Ok(val) => val,
Err(err) => {
error!("{:?}: {}", err, message);
std::process::exit(1);
}
}
}
}
#[macro_export]
macro_rules! create_app {
() => {
Command::new("Vsock Tool")
.about("Tool that runs commands inside the enclave")
.arg_required_else_help(true)
.version(env!("CARGO_PKG_VERSION"))
.subcommand(
Command::new("listen")
.about("Listen on a given port")
.arg(Arg::new("port").long("port").help("port").required(true)),
)
.subcommand(
Command::new("run")
.about("Run a command inside the enclave")
.arg(Arg::new("port").long("port").help("port").required(true))
.arg(Arg::new("cid").long("cid").help("cid").required(true))
.arg(
Arg::new("command")
.long("command")
.help("command")
.required(true),
)
.arg(
Arg::new("no-wait")
.long("no-wait")
.help("command-executer won't wait the command's result")
.action(ArgAction::SetTrue),
),
)
.subcommand(
Command::new("recv-file")
.about("Receive a file from the enclave")
.arg(Arg::new("port").long("port").help("port").required(true))
.arg(Arg::new("cid").long("cid").help("cid").required(true))
.arg(
Arg::new("localpath")
.long("localpath")
.help("localpath")
.required(true),
)
.arg(
Arg::new("remotepath")
.long("remotepath")
.help("remotepath")
.required(true),
),
)
.subcommand(
Command::new("send-file")
.about("Send a file to the enclave")
.arg(Arg::new("port").long("port").help("port").required(true))
.arg(Arg::new("cid").long("cid").help("cid").required(true))
.arg(
Arg::new("localpath")
.long("localpath")
.help("localpath")
.required(true),
)
.arg(
Arg::new("remotepath")
.long("remotepath")
.help("remotepath")
.required(true),
),
)
};
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/samples/command_executer/src/main.rs | samples/command_executer/src/main.rs | use clap::{Arg, ArgAction, Command};
use command_executer::command_parser::{FileArgs, ListenArgs, RunArgs};
use command_executer::create_app;
use command_executer::{listen, recv_file, run, send_file};
fn main() {
let app = create_app!();
let args = app.get_matches();
match args.subcommand() {
Some(("listen", args)) => {
let listen_args = ListenArgs::new_with(args).unwrap();
listen(listen_args).unwrap();
}
Some(("run", args)) => {
let run_args = RunArgs::new_with(args).unwrap();
let rc = run(run_args).unwrap();
std::process::exit(rc);
}
Some(("recv-file", args)) => {
let subcmd_args = FileArgs::new_with(args).unwrap();
recv_file(subcmd_args).unwrap();
}
Some(("send-file", args)) => {
let subcmd_args = FileArgs::new_with(args).unwrap();
send_file(subcmd_args).unwrap();
}
Some(_) | None => {}
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/enclave_build/src/lib.rs | enclave_build/src/lib.rs | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::too_many_arguments)]
use std::fs::File;
use std::path::Path;
use std::process::Command;
mod docker;
mod utils;
mod yaml_generator;
use aws_nitro_enclaves_image_format::defs::{EifBuildInfo, EifIdentityInfo, EIF_HDR_ARCH_ARM64};
use aws_nitro_enclaves_image_format::utils::identity::parse_custom_metadata;
use aws_nitro_enclaves_image_format::utils::{EifBuilder, SignKeyData};
use docker::DockerUtil;
use serde_json::json;
use sha2::Digest;
use std::collections::BTreeMap;
use yaml_generator::YamlGenerator;
pub const DEFAULT_TAG: &str = "1.0";
pub struct Docker2Eif<'a> {
docker_image: String,
docker: DockerUtil,
init_path: String,
nsm_path: String,
kernel_img_path: String,
cmdline: String,
linuxkit_path: String,
artifacts_prefix: String,
output: &'a mut File,
sign_info: Option<SignKeyData>,
img_name: Option<String>,
img_version: Option<String>,
metadata_path: Option<String>,
build_info: EifBuildInfo,
}
#[derive(Debug, PartialEq, Eq)]
pub enum Docker2EifError {
DockerError,
DockerfilePathError,
ImagePullError,
InitPathError,
NsmPathError,
KernelPathError,
LinuxkitExecError,
LinuxkitPathError,
MetadataPathError,
MetadataError(String),
ArtifactsPrefixError,
RamfsError,
RemoveFileError,
SignImageError(String),
SignArgsError,
UnsupportedArchError,
}
impl<'a> Docker2Eif<'a> {
pub fn new(
docker_image: String,
init_path: String,
nsm_path: String,
kernel_img_path: String,
cmdline: String,
linuxkit_path: String,
output: &'a mut File,
artifacts_prefix: String,
certificate_path: &Option<String>,
private_key: &Option<String>,
img_name: Option<String>,
img_version: Option<String>,
metadata_path: Option<String>,
build_info: EifBuildInfo,
) -> Result<Self, Docker2EifError> {
let docker = DockerUtil::new(docker_image.clone()).map_err(|e| {
eprintln!("Docker error: {e:?}");
Docker2EifError::DockerError
})?;
if !Path::new(&init_path).is_file() {
return Err(Docker2EifError::InitPathError);
} else if !Path::new(&nsm_path).is_file() {
return Err(Docker2EifError::NsmPathError);
} else if !Path::new(&kernel_img_path).is_file() {
return Err(Docker2EifError::KernelPathError);
} else if !Path::new(&linuxkit_path).is_file() {
return Err(Docker2EifError::LinuxkitPathError);
} else if !Path::new(&artifacts_prefix).is_dir() {
return Err(Docker2EifError::ArtifactsPrefixError);
}
if let Some(ref path) = metadata_path {
if !Path::new(path).is_file() {
return Err(Docker2EifError::MetadataPathError);
}
}
let sign_info = match (private_key, certificate_path) {
(Some(key), Some(cert)) => SignKeyData::new(key, Path::new(&cert)).map_or_else(
|e| {
eprintln!("Could not read signing info: {e:?}");
None
},
Some,
),
_ => None,
};
Ok(Docker2Eif {
docker_image,
docker,
init_path,
nsm_path,
kernel_img_path,
cmdline,
linuxkit_path,
output,
artifacts_prefix,
sign_info,
img_name,
img_version,
metadata_path,
build_info,
})
}
pub fn pull_docker_image(&self) -> Result<(), Docker2EifError> {
self.docker.pull_image().map_err(|e| {
eprintln!("Docker error: {e:?}");
Docker2EifError::DockerError
})?;
Ok(())
}
pub fn build_docker_image(&self, dockerfile_dir: String) -> Result<(), Docker2EifError> {
if !Path::new(&dockerfile_dir).is_dir() {
return Err(Docker2EifError::DockerfilePathError);
}
self.docker.build_image(dockerfile_dir).map_err(|e| {
eprintln!("Docker error: {e:?}");
Docker2EifError::DockerError
})?;
Ok(())
}
fn generate_identity_info(&self) -> Result<EifIdentityInfo, Docker2EifError> {
let docker_info = self
.docker
.inspect_image()
.map_err(|e| Docker2EifError::MetadataError(format!("Docker inspect error: {e:?}")))?;
let uri_split: Vec<&str> = self.docker_image.split(':').collect();
if uri_split.is_empty() {
return Err(Docker2EifError::MetadataError(
"Wrong image name specified".to_string(),
));
}
// Image hash is used by default in case image version is not provided.
// It's taken from JSON generated by `docker inspect` and a bit fragile.
// May be later we should change it to fetching this data
// from a specific struct and not JSON
let img_hash = docker_info
.get("Id")
.and_then(|val| val.as_str())
.and_then(|str| str.strip_prefix("sha256:"))
.ok_or_else(|| {
Docker2EifError::MetadataError(
"Image info must contain string Id field".to_string(),
)
})?;
let img_name = self
.img_name
.clone()
.unwrap_or_else(|| uri_split[0].to_string());
let img_version = self
.img_version
.clone()
.unwrap_or_else(|| img_hash.to_string());
let mut custom_info = json!(null);
if let Some(ref path) = self.metadata_path {
custom_info = parse_custom_metadata(path).map_err(Docker2EifError::MetadataError)?
}
Ok(EifIdentityInfo {
img_name,
img_version,
build_info: self.build_info.clone(),
docker_info,
custom_info,
})
}
pub fn create(&mut self) -> Result<BTreeMap<String, String>, Docker2EifError> {
let (cmd_file, env_file) = self.docker.load().map_err(|e| {
eprintln!("Docker error: {e:?}");
Docker2EifError::DockerError
})?;
let yaml_generator = YamlGenerator::new(
self.docker_image.clone(),
self.init_path.clone(),
self.nsm_path.clone(),
cmd_file.path().to_str().unwrap().to_string(),
env_file.path().to_str().unwrap().to_string(),
);
let ramfs_config_file = yaml_generator.get_bootstrap_ramfs().map_err(|e| {
eprintln!("Ramfs error: {e:?}");
Docker2EifError::RamfsError
})?;
let ramfs_with_rootfs_config_file = yaml_generator.get_customer_ramfs().map_err(|e| {
eprintln!("Ramfs error: {e:?}");
Docker2EifError::RamfsError
})?;
let bootstrap_ramfs = format!("{}/bootstrap-initrd.img", self.artifacts_prefix);
let customer_ramfs = format!("{}/customer-initrd.img", self.artifacts_prefix);
let output = Command::new(&self.linuxkit_path)
.args([
"build",
"--name",
&bootstrap_ramfs,
"--format",
"kernel+initrd-nogz",
"--no-sbom",
ramfs_config_file.path().to_str().unwrap(),
])
.output()
.map_err(|_| Docker2EifError::LinuxkitExecError)?;
if !output.status.success() {
eprintln!(
"Linuxkit reported an error while creating the bootstrap ramfs: {:?}",
String::from_utf8_lossy(&output.stderr)
);
return Err(Docker2EifError::LinuxkitExecError);
}
// Prefix the docker image filesystem, as expected by init
let output = Command::new(&self.linuxkit_path)
.args([
"build",
"--docker",
"--name",
&customer_ramfs,
"--format",
"kernel+initrd-nogz",
"--no-sbom",
ramfs_with_rootfs_config_file.path().to_str().unwrap(),
])
.output()
.map_err(|_| Docker2EifError::LinuxkitExecError)?;
if !output.status.success() {
eprintln!(
"Linuxkit reported an error while creating the customer ramfs: {:?}",
String::from_utf8_lossy(&output.stderr)
);
return Err(Docker2EifError::LinuxkitExecError);
}
let arch = self.docker.architecture().map_err(|e| {
eprintln!("Docker error: {e:?}");
Docker2EifError::DockerError
})?;
let flags = match arch.as_str() {
docker::DOCKER_ARCH_ARM64 => EIF_HDR_ARCH_ARM64,
docker::DOCKER_ARCH_AMD64 => 0,
_ => return Err(Docker2EifError::UnsupportedArchError),
};
// We cannot clone `sign_info` because it might contain a KmsKey object
// which is not copyable. Since `create` is the last method called, we can
// move it out of the struct.
let sign_info = self.sign_info.take();
let mut build = EifBuilder::new(
Path::new(&self.kernel_img_path),
self.cmdline.clone(),
sign_info,
sha2::Sha384::new(),
flags,
self.generate_identity_info()?,
);
// Linuxkit adds -initrd.img sufix to the file names.
let bootstrap_ramfs = format!("{bootstrap_ramfs}-initrd.img");
let customer_ramfs = format!("{customer_ramfs}-initrd.img");
build.add_ramdisk(Path::new(&bootstrap_ramfs));
build.add_ramdisk(Path::new(&customer_ramfs));
Ok(build.write_to(self.output))
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/enclave_build/src/docker.rs | enclave_build/src/docker.rs | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::docker::DockerError::CredentialsError;
use crate::utils::handle_stream_output;
use base64::{engine::general_purpose, Engine as _};
use bollard::auth::DockerCredentials;
use bollard::image::{BuildImageOptions, CreateImageOptions};
use bollard::secret::ImageInspect;
use bollard::Docker;
use flate2::{write::GzEncoder, Compression};
use log::{debug, error};
use serde_json::{json, Value};
use std::fs::File;
use std::io::Write;
use std::path::Path;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
use url::Url;
/// Docker inspect architecture constants
pub const DOCKER_ARCH_ARM64: &str = "arm64";
pub const DOCKER_ARCH_AMD64: &str = "amd64";
#[derive(Debug, PartialEq, Eq)]
pub enum DockerError {
ConnectionError,
BuildError,
InspectError,
PullError,
RuntimeError,
TempfileError,
CredentialsError(String),
UnsupportedEntryPoint,
}
/// Struct exposing the Docker functionalities to the EIF builder
pub struct DockerUtil {
docker: Docker,
docker_image: String,
}
impl DockerUtil {
/// Constructor that takes as argument a tag for the docker image to be used
pub fn new(docker_image: String) -> Result<Self, DockerError> {
let mut docker_image = docker_image;
if !docker_image.contains(':') {
docker_image.push_str(":latest");
}
// DOCKER_HOST environment variable is parsed inside
// if docker daemon address needs to be substituted.
// By default, it tries to connect to 'unix:///var/run/docker.sock'
let docker = Docker::connect_with_defaults().map_err(|e| {
error!("{:?}", e);
DockerError::ConnectionError
})?;
Ok(DockerUtil {
docker,
docker_image,
})
}
fn parse_docker_host(docker_image: &str) -> Option<String> {
if let Ok(uri) = Url::parse(docker_image) {
uri.host().map(|s| s.to_string())
} else {
// Some Docker URIs don't have the protocol included, so just use
// a dummy one to trick Url that it's a properly defined Uri.
let uri = format!("dummy://{docker_image}");
if let Ok(uri) = Url::parse(&uri) {
uri.host().map(|s| s.to_string())
} else {
None
}
}
}
/// Returns the credentials by reading ${HOME}/.docker/config.json or ${DOCKER_CONFIG}
///
/// config.json doesn't seem to have a schema that we could use to validate
/// we are parsing it correctly, so the parsing mechanism had been infered by
/// reading a config.json created by:
// Docker version 19.03.2
fn get_credentials(&self) -> Result<DockerCredentials, DockerError> {
let host = match Self::parse_docker_host(&self.docker_image) {
Some(host) => host,
None => return Err(CredentialsError("Invalid docker image URI!".to_string())),
};
let config_file = self.get_config_file()?;
let config_json: serde_json::Value = serde_json::from_reader(&config_file)
.map_err(|err| CredentialsError(format!("JSON was not well-formatted: {err}")))?;
let auths = config_json.get("auths").ok_or_else(|| {
CredentialsError("Could not find auths key in config JSON".to_string())
})?;
if let Value::Object(auths) = auths {
for (registry_name, registry_auths) in auths.iter() {
if !registry_name.to_string().contains(&host) {
continue;
}
let auth = registry_auths
.get("auth")
.ok_or_else(|| {
CredentialsError("Could not find auth key in config JSON".to_string())
})?
.to_string();
let auth = auth.replace('"', "");
let decoded = general_purpose::STANDARD.decode(auth).map_err(|err| {
CredentialsError(format!("Invalid Base64 encoding for auth: {err}"))
})?;
let decoded = std::str::from_utf8(&decoded).map_err(|err| {
CredentialsError(format!("Invalid utf8 encoding for auth: {err}"))
})?;
if let Some(index) = decoded.rfind(':') {
let (user, after_user) = decoded.split_at(index);
let (_, password) = after_user.split_at(1);
return Ok(DockerCredentials {
username: Some(user.to_string()),
password: Some(password.to_string()),
..Default::default()
});
}
}
}
Err(CredentialsError(
"No credentials found for the current image".to_string(),
))
}
fn get_config_file(&self) -> Result<File, DockerError> {
if let Ok(file) = std::env::var("DOCKER_CONFIG") {
let config_file = File::open(file).map_err(|err| {
DockerError::CredentialsError(format!(
"Could not open file pointed by env\
DOCKER_CONFIG: {err}"
))
})?;
Ok(config_file)
} else {
if let Ok(home_dir) = std::env::var("HOME") {
let default_config_path = format!("{home_dir}/.docker/config.json");
let config_path = Path::new(&default_config_path);
if config_path.exists() {
let config_file = File::open(config_path).map_err(|err| {
DockerError::CredentialsError(format!(
"Could not open file {:?}: {}",
config_path.to_str(),
err
))
})?;
return Ok(config_file);
}
}
Err(DockerError::CredentialsError(
"Config file not present, please set env \
DOCKER_CONFIG accordingly"
.to_string(),
))
}
}
/// Pull the image, with the tag provided in constructor, from the Docker registry
pub fn pull_image(&self) -> Result<(), DockerError> {
// Check if the Docker image is locally available.
// If available, early exit.
if self.inspect().is_ok() {
eprintln!("Using the locally available Docker image...");
return Ok(());
}
let runtime = Runtime::new().map_err(|_| DockerError::RuntimeError)?;
runtime.block_on(async {
let create_image_options = CreateImageOptions {
from_image: self.docker_image.clone(),
..Default::default()
};
let credentials = match self.get_credentials() {
Ok(auth) => Some(auth),
// It is not mandatory to have the credentials set, but this is
// the most likely reason for failure when pulling, so log the
// error.
Err(err) => {
debug!("WARNING!! Credential could not be set {:?}", err);
None
}
};
let stream = self
.docker
.create_image(Some(create_image_options), None, credentials);
handle_stream_output(stream, DockerError::PullError).await
})
}
fn build_tarball(dockerfile_dir: String) -> Result<Vec<u8>, DockerError> {
let encoder = GzEncoder::new(Vec::default(), Compression::best());
let mut archive = tar::Builder::new(encoder);
archive.append_dir_all(".", &dockerfile_dir).map_err(|e| {
error!("{:?}", e);
DockerError::BuildError
})?;
archive.into_inner().and_then(|c| c.finish()).map_err(|e| {
error!("{:?}", e);
DockerError::BuildError
})
}
/// Build an image locally, with the tag provided in constructor, using a
/// directory that contains a Dockerfile
pub fn build_image(&self, dockerfile_dir: String) -> Result<(), DockerError> {
let runtime = Runtime::new().map_err(|_| DockerError::RuntimeError)?;
runtime.block_on(async move {
let stream = self.docker.build_image(
BuildImageOptions {
dockerfile: "Dockerfile".to_string(),
t: self.docker_image.clone(),
..Default::default()
},
None,
Some(Self::build_tarball(dockerfile_dir)?.into()),
);
handle_stream_output(stream, DockerError::BuildError).await
})
}
fn inspect(&self) -> Result<ImageInspect, DockerError> {
let runtime = Runtime::new().map_err(|_| DockerError::RuntimeError)?;
let image_future = self.docker.inspect_image(&self.docker_image);
runtime.block_on(async {
match image_future.await {
Ok(image) => Ok(image),
Err(e) => {
error!("{:?}", e);
Err(DockerError::InspectError)
}
}
})
}
/// Inspect docker image and return its description as a json String
pub fn inspect_image(&self) -> Result<serde_json::Value, DockerError> {
match self.inspect() {
Ok(image) => Ok(json!(image)),
Err(e) => {
error!("{:?}", e);
Err(DockerError::InspectError)
}
}
}
fn extract_image(&self) -> Result<(Vec<String>, Vec<String>), DockerError> {
// First try to find CMD parameters (together with potential ENV bindings)
let image = self.inspect()?;
let config = image.config.ok_or(DockerError::UnsupportedEntryPoint)?;
if let Some(cmd) = &config.cmd {
let env = config.env.unwrap_or_default();
return Ok((cmd.clone(), env));
}
// If no CMD instructions are found, try to locate an ENTRYPOINT command
if let Some(entrypoint) = &config.entrypoint {
let env = config.env.unwrap_or_default();
return Ok((entrypoint.clone(), env));
}
Err(DockerError::UnsupportedEntryPoint)
}
/// The main function of this struct. This needs to be called in order to
/// extract the necessary configuration values from the docker image with
/// the tag provided in the constructor
pub fn load(&self) -> Result<(NamedTempFile, NamedTempFile), DockerError> {
let (cmd, env) = self.extract_image()?;
let cmd_file = write_config(cmd)?;
let env_file = write_config(env)?;
Ok((cmd_file, env_file))
}
/// Fetch architecture information from an image
pub fn architecture(&self) -> Result<String, DockerError> {
let image = self.inspect()?;
Ok(image.architecture.unwrap_or_default())
}
}
fn write_config(config: Vec<String>) -> Result<NamedTempFile, DockerError> {
let mut file = NamedTempFile::new().map_err(|_| DockerError::TempfileError)?;
for line in config {
file.write_fmt(format_args!("{line}\n"))
.map_err(|_| DockerError::TempfileError)?;
}
Ok(file)
}
#[cfg(test)]
mod tests {
use super::*;
use base64::engine::general_purpose;
use std::{env, io::Read};
/// Test extracted configuration is as expected
#[test]
fn test_config() {
let docker = DockerUtil::new(String::from("public.ecr.aws/aws-nitro-enclaves/hello:v1"));
let (cmd_file, env_file) = docker.unwrap().load().unwrap();
let mut cmd_file = File::open(cmd_file.path()).unwrap();
let mut env_file = File::open(env_file.path()).unwrap();
let mut cmd = String::new();
cmd_file.read_to_string(&mut cmd).unwrap();
assert_eq!(cmd, "/bin/hello.sh\n");
let mut env = String::new();
env_file.read_to_string(&mut env).unwrap();
assert_eq!(
env,
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n\
HELLO=Hello from the enclave side!\n"
);
}
#[test]
fn test_new() {
let docker = DockerUtil::new(String::from("alpine")).unwrap();
assert_eq!(docker.docker_image, "alpine:latest");
let docker = DockerUtil::new(String::from("nginx:1.19")).unwrap();
assert_eq!(docker.docker_image, "nginx:1.19");
}
#[test]
fn test_get_credentials() {
let test_user = "test_user";
let test_password = "test_password";
let auth = format!("{test_user}:{test_password}");
let encoded_auth = general_purpose::STANDARD.encode(auth);
let config = format!(
r#"{{
"auths": {{
"https://public.ecr.aws/aws-nitro-enclaves/hello/v1/": {{
"auth": "{encoded_auth}"
}},
"https://registry.example.com": {{
"auth": "b3RoZXJfdXNlcjpvdGhlcl9wYXNzd29yZA=="
}}
}}
}}"#
);
// Create a temporary file
let mut temp_file = NamedTempFile::new().expect("Failed to create temporary file.");
// Write the config to the temporary file
write!(temp_file, "{config}").expect("Failed to write to temporary file.");
// Set the DOCKER_CONFIG environment variable to point to the temporary file's path
let temp_file_path = temp_file.path().to_string_lossy().to_string();
env::set_var("DOCKER_CONFIG", temp_file_path);
let docker =
DockerUtil::new(String::from("public.ecr.aws/aws-nitro-enclaves/hello:v1")).unwrap();
let creds = docker.get_credentials().unwrap();
assert_eq!(creds.username, Some(test_user.to_string()));
assert_eq!(creds.password, Some(test_password.to_string()));
temp_file.close().unwrap();
}
#[test]
fn test_architecture() {
#[cfg(target_arch = "x86_64")]
{
let docker =
DockerUtil::new(String::from("public.ecr.aws/aws-nitro-enclaves/hello:v1"))
.unwrap();
docker.pull_image().unwrap();
let arch = docker.architecture().unwrap();
assert_eq!(arch, "amd64");
}
#[cfg(target_arch = "aarch64")]
{
let docker = DockerUtil::new(String::from("arm64v8/alpine")).unwrap();
docker.pull_image().unwrap();
let arch = docker.architecture().unwrap();
assert_eq!(arch, "arm64");
}
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/enclave_build/src/utils.rs | enclave_build/src/utils.rs | // Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use bollard::errors::Error;
use bollard::secret::{BuildInfo, CreateImageInfo};
use futures::stream::StreamExt;
use futures::Stream;
use log::{error, info};
pub trait StreamItem {
fn error(&self) -> Option<String>;
}
// Implement StreamItem for CreateImageInfo
impl StreamItem for CreateImageInfo {
fn error(&self) -> Option<String> {
self.error.clone()
}
}
// Implement StreamItem for BuildInfo
impl StreamItem for BuildInfo {
fn error(&self) -> Option<String> {
self.error.clone()
}
}
pub async fn handle_stream_output<T, U>(
mut stream: impl Stream<Item = Result<T, Error>> + Unpin,
error_type: U,
) -> Result<(), U>
where
T: StreamItem + std::fmt::Debug,
{
while let Some(item) = stream.next().await {
match item {
Ok(output) => {
if let Some(err_msg) = output.error() {
error!("{:?}", err_msg);
return Err(error_type);
} else {
info!("{:?}", output);
}
}
Err(e) => {
error!("{:?}", e);
return Err(error_type);
}
}
}
Ok(())
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/enclave_build/src/yaml_generator.rs | enclave_build/src/yaml_generator.rs | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use serde::{Deserialize, Serialize};
use std::io::Write;
use tempfile::NamedTempFile;
#[derive(Debug, Serialize, Deserialize)]
struct BootstrapRamfsTemplate {
files: (DirTemplate, FileTemplate, FileTemplate),
}
#[derive(Debug, Serialize, Deserialize)]
struct CustomerRamfsTemplate {
prefix: String,
init: Vec<String>,
files: (
DirTemplate,
DirTemplate,
DirTemplate,
DirTemplate,
DirTemplate,
DirTemplate,
FileTemplate,
FileTemplate,
),
}
#[derive(Debug, Serialize, Deserialize)]
struct FileTemplate {
path: String,
source: String,
mode: String,
}
#[derive(Debug, Serialize, Deserialize)]
struct DirTemplate {
path: String,
directory: bool,
mode: String,
}
#[derive(Debug, PartialEq, Eq)]
pub enum YamlGeneratorError {
TempfileError,
}
pub struct YamlGenerator {
docker_image: String,
init_path: String,
nsm_path: String,
cmd_path: String,
env_path: String,
}
impl YamlGenerator {
pub fn new(
docker_image: String,
init_path: String,
nsm_path: String,
cmd_path: String,
env_path: String,
) -> Self {
YamlGenerator {
docker_image,
init_path,
nsm_path,
cmd_path,
env_path,
}
}
pub fn get_bootstrap_ramfs(&self) -> Result<NamedTempFile, YamlGeneratorError> {
let ramfs = BootstrapRamfsTemplate {
files: (
DirTemplate {
path: String::from("dev"),
directory: true,
mode: String::from("0755"),
},
FileTemplate {
path: String::from("init"),
source: self.init_path.clone(),
mode: String::from("0755"),
},
FileTemplate {
path: String::from("nsm.ko"),
source: self.nsm_path.clone(),
mode: String::from("0755"),
},
),
};
let yaml = serde_yaml::to_string(&ramfs);
let mut file = NamedTempFile::new().map_err(|_| YamlGeneratorError::TempfileError)?;
file.write_all(yaml.unwrap().as_bytes())
.map_err(|_| YamlGeneratorError::TempfileError)?;
Ok(file)
}
pub fn get_customer_ramfs(&self) -> Result<NamedTempFile, YamlGeneratorError> {
let ramfs = CustomerRamfsTemplate {
prefix: "rootfs/".to_string(),
init: vec![self.docker_image.clone()],
// Each directory must stay under rootfs, as expected by init
files: (
DirTemplate {
path: String::from("rootfs/dev"),
directory: true,
mode: String::from("0755"),
},
DirTemplate {
path: String::from("rootfs/run"),
directory: true,
mode: String::from("0755"),
},
DirTemplate {
path: String::from("rootfs/sys"),
directory: true,
mode: String::from("0755"),
},
DirTemplate {
path: String::from("rootfs/var"),
directory: true,
mode: String::from("0755"),
},
DirTemplate {
path: String::from("rootfs/proc"),
directory: true,
mode: String::from("0755"),
},
DirTemplate {
path: String::from("rootfs/tmp"),
directory: true,
mode: String::from("0755"),
},
FileTemplate {
path: String::from("cmd"),
source: self.cmd_path.clone(),
mode: String::from("0644"),
},
FileTemplate {
path: String::from("env"),
source: self.env_path.clone(),
mode: String::from("0644"),
},
),
};
let yaml = serde_yaml::to_string(&ramfs);
let mut file = NamedTempFile::new().map_err(|_| YamlGeneratorError::TempfileError)?;
file.write_all(yaml.unwrap().as_bytes())
.map_err(|_| YamlGeneratorError::TempfileError)?;
Ok(file)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::Read;
/// Test YAML config files are the same as the ones written by hand
#[test]
fn test_ramfs() {
let yaml_generator = YamlGenerator::new(
String::from("docker_image"),
String::from("path_to_init"),
String::from("path_to_nsm"),
String::from("path_to_cmd"),
String::from("path_to_env"),
);
let mut bootstrap_data = String::new();
let bootstrap_ramfs = yaml_generator.get_bootstrap_ramfs().unwrap();
let mut bootstrap_ramfs = File::open(bootstrap_ramfs.path()).unwrap();
bootstrap_ramfs.read_to_string(&mut bootstrap_data).unwrap();
assert_eq!(
bootstrap_data,
"---\
\nfiles:\
\n - path: dev\
\n directory: true\
\n mode: \"0755\"\
\n - path: init\
\n source: path_to_init\
\n mode: \"0755\"\
\n - path: nsm.ko\
\n source: path_to_nsm\
\n mode: \"0755\"\
\n\
"
);
let mut customer_data = String::new();
let customer_ramfs = yaml_generator.get_customer_ramfs().unwrap();
let mut customer_ramfs = File::open(customer_ramfs.path()).unwrap();
customer_ramfs.read_to_string(&mut customer_data).unwrap();
assert_eq!(
customer_data,
"---\
\nprefix: rootfs/\
\ninit:\
\n - docker_image\
\nfiles:\
\n - path: rootfs/dev\
\n directory: true\
\n mode: \"0755\"\
\n - path: rootfs/run\
\n directory: true\
\n mode: \"0755\"\
\n - path: rootfs/sys\
\n directory: true\
\n mode: \"0755\"\
\n - path: rootfs/var\
\n directory: true\
\n mode: \"0755\"\
\n - path: rootfs/proc\
\n directory: true\
\n mode: \"0755\"\
\n - path: rootfs/tmp\
\n directory: true\
\n mode: \"0755\"\
\n - path: cmd\
\n source: path_to_cmd\
\n mode: \"0644\"\
\n - path: env\
\n source: path_to_env\
\n mode: \"0644\"\
\n\
"
);
}
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/enclave_build/src/main.rs | enclave_build/src/main.rs | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use clap::{Arg, ArgAction, Command};
use std::fs::OpenOptions;
use aws_nitro_enclaves_image_format::generate_build_info;
use enclave_build::Docker2Eif;
fn main() {
let matches = Command::new("Docker2Eif builder")
.about("Generate consistent EIF image from a Docker image")
.arg(
Arg::new("docker_image")
.short('t')
.long("tag")
.help("Docker image tag")
.required(true),
)
.arg(
Arg::new("init_path")
.short('i')
.long("init")
.help("Path to a binary representing the init process for the enclave")
.required(true),
)
.arg(
Arg::new("nsm_path")
.short('n')
.long("nsm")
.help("Path to the NitroSecureModule Kernel Driver")
.required(true),
)
.arg(
Arg::new("kernel_img_path")
.short('k')
.long("kernel")
.help("Path to a bzImage/Image file for x86_64/aarch64 linux kernel")
.required(true),
)
.arg(
Arg::new("kernel_cfg_path")
.long("kernel_config")
.help("Path to a bzImage.config/Image.config file for x86_64/aarch64 linux kernel config")
.required(true),
)
.arg(
Arg::new("cmdline")
.short('c')
.long("cmdline")
.help("Cmdline for kernel")
.required(true),
)
.arg(
Arg::new("linuxkit_path")
.short('l')
.long("linuxkit")
.help("Linuxkit executable path")
.required(true),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.help("Output file for EIF image")
.required(true),
)
.arg(
Arg::new("signing-certificate")
.long("signing-certificate")
.help("Specify the path to the signing certificate")
.requires("private-key"),
)
.arg(
Arg::new("private-key")
.long("private-key")
.help("Specify KMS key ARN or the path to the private key file")
.requires("signing-certificate"),
)
.arg(
Arg::new("build")
.short('b')
.long("build")
.help("Build image from Dockerfile")
.conflicts_with("pull"),
)
.arg(
Arg::new("pull")
.short('p')
.long("pull")
.help("Pull the Docker image before generating EIF")
.action(ArgAction::SetTrue)
.conflicts_with("build"),
)
.arg(
Arg::new("image_name")
.long("name")
.help("Name for enclave image"),
)
.arg(
Arg::new("image_version")
.long("version")
.help("Version of the enclave image"),
)
.arg(
Arg::new("metadata")
.long("metadata")
.help("Path to JSON containing the custom metadata provided by the user"),
)
.get_matches();
let docker_image = matches.get_one::<String>("docker_image").unwrap();
let init_path = matches.get_one::<String>("init_path").unwrap();
let nsm_path = matches.get_one::<String>("nsm_path").unwrap();
let kernel_img_path = matches.get_one::<String>("kernel_img_path").unwrap();
let kernel_cfg_path = matches.get_one::<String>("kernel_cfg_path").unwrap();
let cmdline = matches.get_one::<String>("cmdline").unwrap();
let linuxkit_path = matches.get_one::<String>("linuxkit_path").unwrap();
let output = matches.get_one::<String>("output").unwrap();
let signing_certificate = matches
.get_one::<String>("signing-certificate")
.map(String::from);
let private_key = matches.get_one::<String>("private-key").map(String::from);
let img_name = matches.get_one::<String>("image_name").map(String::from);
let img_version = matches.get_one::<String>("image_version").map(String::from);
let metadata = matches.get_one::<String>("metadata").map(String::from);
let mut output = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(output)
.expect("Failed to create output file");
let mut img = Docker2Eif::new(
docker_image.to_string(),
init_path.to_string(),
nsm_path.to_string(),
kernel_img_path.to_string(),
cmdline.to_string(),
linuxkit_path.to_string(),
&mut output,
".".to_string(),
&signing_certificate,
&private_key,
img_name,
img_version,
metadata,
generate_build_info!(kernel_cfg_path).expect("Can not generate build info"),
)
.unwrap();
if let Some(dockerfile_dir) = matches.get_one::<String>("build") {
img.build_docker_image(dockerfile_dir.to_string()).unwrap();
} else if matches.get_flag("pull") {
img.pull_docker_image().unwrap();
}
img.create().unwrap();
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/driver-bindings/src/bindings.rs | driver-bindings/src/bindings.rs | /* automatically generated by rust-bindgen 0.62.0 */
pub const NE_ERR_VCPU_ALREADY_USED: u32 = 256;
pub const NE_ERR_VCPU_NOT_IN_CPU_POOL: u32 = 257;
pub const NE_ERR_VCPU_INVALID_CPU_CORE: u32 = 258;
pub const NE_ERR_INVALID_MEM_REGION_SIZE: u32 = 259;
pub const NE_ERR_INVALID_MEM_REGION_ADDR: u32 = 260;
pub const NE_ERR_UNALIGNED_MEM_REGION_ADDR: u32 = 261;
pub const NE_ERR_MEM_REGION_ALREADY_USED: u32 = 262;
pub const NE_ERR_MEM_NOT_HUGE_PAGE: u32 = 263;
pub const NE_ERR_MEM_DIFFERENT_NUMA_NODE: u32 = 264;
pub const NE_ERR_MEM_MAX_REGIONS: u32 = 265;
pub const NE_ERR_NO_MEM_REGIONS_ADDED: u32 = 266;
pub const NE_ERR_NO_VCPUS_ADDED: u32 = 267;
pub const NE_ERR_ENCLAVE_MEM_MIN_SIZE: u32 = 268;
pub const NE_ERR_FULL_CORES_NOT_USED: u32 = 269;
pub const NE_ERR_NOT_IN_INIT_STATE: u32 = 270;
pub const NE_ERR_INVALID_VCPU: u32 = 271;
pub const NE_ERR_NO_CPUS_AVAIL_IN_POOL: u32 = 272;
pub const NE_ERR_INVALID_PAGE_SIZE: u32 = 273;
pub const NE_ERR_INVALID_FLAG_VALUE: u32 = 274;
pub const NE_ERR_INVALID_ENCLAVE_CID: u32 = 275;
pub type __u64 = ::std::os::raw::c_ulonglong;
#[doc = " struct ne_image_load_info - Info necessary for in-memory enclave image"]
#[doc = "\t\t\t loading (in / out)."]
#[doc = " @flags:\t\tFlags to determine the enclave image type"]
#[doc = "\t\t\t(e.g. Enclave Image Format - EIF) (in)."]
#[doc = " @memory_offset:\tOffset in enclave memory where to start placing the"]
#[doc = "\t\t\tenclave image (out)."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct ne_image_load_info {
pub flags: __u64,
pub memory_offset: __u64,
}
#[test]
fn bindgen_test_layout_ne_image_load_info() {
const UNINIT: ::std::mem::MaybeUninit<ne_image_load_info> = ::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<ne_image_load_info>(),
16usize,
concat!("Size of: ", stringify!(ne_image_load_info))
);
assert_eq!(
::std::mem::align_of::<ne_image_load_info>(),
8usize,
concat!("Alignment of ", stringify!(ne_image_load_info))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(ne_image_load_info),
"::",
stringify!(flags)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).memory_offset) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(ne_image_load_info),
"::",
stringify!(memory_offset)
)
);
}
#[doc = " struct ne_user_memory_region - Memory region to be set for an enclave (in)."]
#[doc = " @flags:\t\tFlags to determine the usage for the memory region (in)."]
#[doc = " @memory_size:\tThe size, in bytes, of the memory region to be set for"]
#[doc = "\t\t\tan enclave (in)."]
#[doc = " @userspace_addr:\tThe start address of the userspace allocated memory of"]
#[doc = "\t\t\tthe memory region to set for an enclave (in)."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct ne_user_memory_region {
pub flags: __u64,
pub memory_size: __u64,
pub userspace_addr: __u64,
}
#[test]
fn bindgen_test_layout_ne_user_memory_region() {
const UNINIT: ::std::mem::MaybeUninit<ne_user_memory_region> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<ne_user_memory_region>(),
24usize,
concat!("Size of: ", stringify!(ne_user_memory_region))
);
assert_eq!(
::std::mem::align_of::<ne_user_memory_region>(),
8usize,
concat!("Alignment of ", stringify!(ne_user_memory_region))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(ne_user_memory_region),
"::",
stringify!(flags)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).memory_size) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(ne_user_memory_region),
"::",
stringify!(memory_size)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).userspace_addr) as usize - ptr as usize },
16usize,
concat!(
"Offset of field: ",
stringify!(ne_user_memory_region),
"::",
stringify!(userspace_addr)
)
);
}
#[doc = " struct ne_enclave_start_info - Setup info necessary for enclave start (in / out)."]
#[doc = " @flags:\t\tFlags for the enclave to start with (e.g. debug mode) (in)."]
#[doc = " @enclave_cid:\tContext ID (CID) for the enclave vsock device. If 0 as"]
#[doc = "\t\t\tinput, the CID is autogenerated by the hypervisor and"]
#[doc = "\t\t\treturned back as output by the driver (in / out)."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct ne_enclave_start_info {
pub flags: __u64,
pub enclave_cid: __u64,
}
#[test]
fn bindgen_test_layout_ne_enclave_start_info() {
const UNINIT: ::std::mem::MaybeUninit<ne_enclave_start_info> =
::std::mem::MaybeUninit::uninit();
let ptr = UNINIT.as_ptr();
assert_eq!(
::std::mem::size_of::<ne_enclave_start_info>(),
16usize,
concat!("Size of: ", stringify!(ne_enclave_start_info))
);
assert_eq!(
::std::mem::align_of::<ne_enclave_start_info>(),
8usize,
concat!("Alignment of ", stringify!(ne_enclave_start_info))
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(ne_enclave_start_info),
"::",
stringify!(flags)
)
);
assert_eq!(
unsafe { ::std::ptr::addr_of!((*ptr).enclave_cid) as usize - ptr as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(ne_enclave_start_info),
"::",
stringify!(enclave_cid)
)
);
}
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
aws/aws-nitro-enclaves-cli | https://github.com/aws/aws-nitro-enclaves-cli/blob/9fa1c9dd7071a310e3dc2414705ac65a4905ee7e/driver-bindings/src/lib.rs | driver-bindings/src/lib.rs | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Rust FFI bindings to Linux Nitro Enclaves driver, generated using
//! [bindgen](https://crates.io/crates/bindgen).
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
pub mod bindings;
pub use self::bindings::*;
| rust | Apache-2.0 | 9fa1c9dd7071a310e3dc2414705ac65a4905ee7e | 2026-01-04T20:09:48.102940Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/assembler.rs | src/assembler.rs | use crate::{evm::*, types::*};
// Assemble disassembled bytecode
pub fn assemble(bytecode: &Bytecode) -> String {
let mut byte_string: String = String::from("0x");
for byte in bytecode {
byte_string.push_str(String::from(byte.opcode).as_str());
if !byte.pushdata.is_none() {
byte_string.push_str(byte.pushdata.clone().unwrap().as_str());
}
}
byte_string
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_assemble() {
let bytecode: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push1,
pushdata: Some(String::from("80")),
},
ByteData {
code_index: 2,
opcode: Opcode::Push1,
pushdata: Some(String::from("54")),
},
];
let byte_string = String::from("0x60806054");
assert_eq!(byte_string, assemble(&bytecode));
}
#[test]
fn test_assemble_push0() {
let bytecode: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push0,
pushdata: None,
},
ByteData {
code_index: 1,
opcode: Opcode::Push1,
pushdata: Some(String::from("01")),
},
];
let byte_string = String::from("0x5f6001");
assert_eq!(byte_string, assemble(&bytecode));
}
}
| rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/evm.rs | src/evm.rs | use std::fmt;
use strum_macros::EnumString;
pub const PUSH_OPS: [Opcode; 33] = [
Opcode::Push0,
Opcode::Push1,
Opcode::Push2,
Opcode::Push3,
Opcode::Push4,
Opcode::Push5,
Opcode::Push6,
Opcode::Push7,
Opcode::Push8,
Opcode::Push9,
Opcode::Push10,
Opcode::Push11,
Opcode::Push12,
Opcode::Push13,
Opcode::Push14,
Opcode::Push15,
Opcode::Push16,
Opcode::Push17,
Opcode::Push18,
Opcode::Push19,
Opcode::Push20,
Opcode::Push21,
Opcode::Push22,
Opcode::Push23,
Opcode::Push24,
Opcode::Push25,
Opcode::Push26,
Opcode::Push27,
Opcode::Push28,
Opcode::Push29,
Opcode::Push30,
Opcode::Push31,
Opcode::Push32,
];
// Source: https://github.com/huff-language/huff-rs/blob/main/huff_utils/src/evm.rs
/// EVM Opcodes
/// References <https://evm.codes>
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, EnumString)]
#[strum(serialize_all = "lowercase")]
pub enum Opcode {
/// Halts execution.
Stop,
/// Addition operation
Add,
/// Multiplication Operation
Mul,
/// Subtraction Operation
Sub,
/// Integer Division Operation
Div,
/// Signed Integer Division Operation
Sdiv,
/// Modulo Remainder Operation
Mod,
/// Signed Modulo Remainder Operation
Smod,
/// Modulo Addition Operation
Addmod,
/// Modulo Multiplication Operation
Mulmod,
/// Exponential Operation
Exp,
/// Extend Length of Two's Complement Signed Integer
Signextend,
/// Less-than Comparison
Lt,
/// Greater-than Comparison
Gt,
/// Signed Less-than Comparison
Slt,
/// Signed Greater-than Comparison
Sgt,
/// Equality Comparison
Eq,
/// Not Operation
Iszero,
/// Bitwise AND Operation
And,
/// Bitwise OR Operation
Or,
/// Bitwise XOR Operation
Xor,
/// Bitwise NOT Operation
Not,
/// Retrieve Single Byte from Word
Byte,
/// Left Shift Operation
Shl,
/// Right Shift Operation
Shr,
/// Arithmetic Shift Right Operation
Sar,
/// Compute the Keccak-256 hash of a 32-byte word
Sha3,
/// Address of currently executing account
Address,
/// Balance of a given account
Balance,
/// Address of execution origination
Origin,
/// Address of the caller
Caller,
/// Value of the call
Callvalue,
/// Loads Calldata
Calldataload,
/// Size of the Calldata
Calldatasize,
/// Copies the Calldata to Memory
Calldatacopy,
/// Size of the Executing Code
Codesize,
/// Copies Executing Code to Memory
Codecopy,
/// Current Price of Gas
Gasprice,
/// Size of an Account's Code
Extcodesize,
/// Copies an Account's Code to Memory
Extcodecopy,
/// Size of Output Data from Previous Call
Returndatasize,
/// Copies Output Data from Previous Call to Memory
Returndatacopy,
/// Hash of a Block from the most recent 256 blocks
Blockhash,
/// The Current Blocks Beneficiary Address
Coinbase,
/// The Current Blocks Timestamp
Timestamp,
/// The Current Blocks Number
Number,
/// The Current Blocks Difficulty
Difficulty,
/// Pseudorandomness from the Beacon Chain
Prevrandao,
/// The Current Blocks Gas Limit
Gaslimit,
/// The Chain ID
Chainid,
/// Balance of the Currently Executing Account
Selfbalance,
/// Base Fee
Basefee,
/// Removes an Item from the Stack
Pop,
/// Loads a word from Memory
Mload,
/// Stores a word in Memory
Mstore,
/// Stores a byte in Memory
Mstore8,
/// Load a word from Storage
Sload,
/// Store a word in Storage
Sstore,
/// Alter the Program Counter
Jump,
/// Conditionally Alter the Program Counter
Jumpi,
/// Value of the Program Counter Before the Current Instruction
Pc,
/// Size of Active Memory in Bytes
Msize,
/// Amount of available gas including the cost of the current instruction
Gas,
/// Marks a valid destination for jumps
Jumpdest,
/// Places 0 on top of the stack
Push0,
/// Places 1 byte item on top of the stack
Push1,
/// Places 2 byte item on top of the stack
Push2,
/// Places 3 byte item on top of the stack
Push3,
/// Places 4 byte item on top of the stack
Push4,
/// Places 5 byte item on top of the stack
Push5,
/// Places 6 byte item on top of the stack
Push6,
/// Places 7 byte item on top of the stack
Push7,
/// Places 8 byte item on top of the stack
Push8,
/// Places 9 byte item on top of the stack
Push9,
/// Places 10 byte item on top of the stack
Push10,
/// Places 11 byte item on top of the stack
Push11,
/// Places 12 byte item on top of the stack
Push12,
/// Places 13 byte item on top of the stack
Push13,
/// Places 14 byte item on top of the stack
Push14,
/// Places 15 byte item on top of the stack
Push15,
/// Places 16 byte item on top of the stack
Push16,
/// Places 17 byte item on top of the stack
Push17,
/// Places 18 byte item on top of the stack
Push18,
/// Places 19 byte item on top of the stack
Push19,
/// Places 20 byte item on top of the stack
Push20,
/// Places 21 byte item on top of the stack
Push21,
/// Places 22 byte item on top of the stack
Push22,
/// Places 23 byte item on top of the stack
Push23,
/// Places 24 byte item on top of the stack
Push24,
/// Places 25 byte item on top of the stack
Push25,
/// Places 26 byte item on top of the stack
Push26,
/// Places 27 byte item on top of the stack
Push27,
/// Places 28 byte item on top of the stack
Push28,
/// Places 29 byte item on top of the stack
Push29,
/// Places 30 byte item on top of the stack
Push30,
/// Places 31 byte item on top of the stack
Push31,
/// Places 32 byte item on top of the stack
Push32,
/// Duplicates the first stack item
Dup1,
/// Duplicates the 2nd stack item
Dup2,
/// Duplicates the 3rd stack item
Dup3,
/// Duplicates the 4th stack item
Dup4,
/// Duplicates the 5th stack item
Dup5,
/// Duplicates the 6th stack item
Dup6,
/// Duplicates the 7th stack item
Dup7,
/// Duplicates the 8th stack item
Dup8,
/// Duplicates the 9th stack item
Dup9,
/// Duplicates the 10th stack item
Dup10,
/// Duplicates the 11th stack item
Dup11,
/// Duplicates the 12th stack item
Dup12,
/// Duplicates the 13th stack item
Dup13,
/// Duplicates the 14th stack item
Dup14,
/// Duplicates the 15th stack item
Dup15,
/// Duplicates the 16th stack item
Dup16,
/// Exchange the top two stack items
Swap1,
/// Exchange the first and third stack items
Swap2,
/// Exchange the first and fourth stack items
Swap3,
/// Exchange the first and fifth stack items
Swap4,
/// Exchange the first and sixth stack items
Swap5,
/// Exchange the first and seventh stack items
Swap6,
/// Exchange the first and eighth stack items
Swap7,
/// Exchange the first and ninth stack items
Swap8,
/// Exchange the first and tenth stack items
Swap9,
/// Exchange the first and eleventh stack items
Swap10,
/// Exchange the first and twelfth stack items
Swap11,
/// Exchange the first and thirteenth stack items
Swap12,
/// Exchange the first and fourteenth stack items
Swap13,
/// Exchange the first and fifteenth stack items
Swap14,
/// Exchange the first and sixteenth stack items
Swap15,
/// Exchange the first and seventeenth stack items
Swap16,
/// Append Log Record with no Topics
Log0,
/// Append Log Record with 1 Topic
Log1,
/// Append Log Record with 2 Topics
Log2,
/// Append Log Record with 3 Topics
Log3,
/// Append Log Record with 4 Topics
Log4,
/// Create a new account with associated code
Create,
/// Message-call into an account
Call,
/// Message-call into this account with an alternative accounts code
Callcode,
/// Halt execution, returning output data
Return,
/// Message-call into this account with an alternative accounts code, persisting the sender and
/// value
Delegatecall,
/// Create a new account with associated code
Create2,
/// Static Message-call into an account
Staticcall,
/// Halt execution, reverting state changes, but returning data and remaining gas
Revert,
/// Invalid Instruction
Invalid,
/// Halt Execution and Register Account for later deletion
Selfdestruct,
/// Get hash of an account’s code
Extcodehash,
/// Get the blob base-fee of the current block
Blobbasefee,
/// Gey the versioned hash of transaction blobs
Blobhash,
/// Efficiently copy memory areas with less overhead
Mcopy,
/// Reads from transient storage
Tload,
/// Writes to transient storage
Tstore,
/// Non-existent opcode
InvalidOpcode,
}
// Source: https://github.com/huff-language/huff-rs/blob/main/huff_utils/src/evm.rs
impl Opcode {
/// Translates a hex string into an Opcode
pub fn new(string: &str) -> Self {
let opcode = match string {
"00" => Opcode::Stop,
"01" => Opcode::Add,
"02" => Opcode::Mul,
"03" => Opcode::Sub,
"04" => Opcode::Div,
"05" => Opcode::Sdiv,
"06" => Opcode::Mod,
"07" => Opcode::Smod,
"08" => Opcode::Addmod,
"09" => Opcode::Mulmod,
"0a" => Opcode::Exp,
"0b" => Opcode::Signextend,
"10" => Opcode::Lt,
"11" => Opcode::Gt,
"12" => Opcode::Slt,
"13" => Opcode::Sgt,
"14" => Opcode::Eq,
"15" => Opcode::Iszero,
"16" => Opcode::And,
"17" => Opcode::Or,
"18" => Opcode::Xor,
"19" => Opcode::Not,
"1a" => Opcode::Byte,
"1b" => Opcode::Shl,
"1c" => Opcode::Shr,
"1d" => Opcode::Sar,
"20" => Opcode::Sha3,
"30" => Opcode::Address,
"31" => Opcode::Balance,
"32" => Opcode::Origin,
"33" => Opcode::Caller,
"34" => Opcode::Callvalue,
"35" => Opcode::Calldataload,
"36" => Opcode::Calldatasize,
"37" => Opcode::Calldatacopy,
"38" => Opcode::Codesize,
"39" => Opcode::Codecopy,
"3a" => Opcode::Gasprice,
"3b" => Opcode::Extcodesize,
"3c" => Opcode::Extcodecopy,
"3d" => Opcode::Returndatasize,
"3e" => Opcode::Returndatacopy,
"3f" => Opcode::Extcodehash,
"40" => Opcode::Blockhash,
"41" => Opcode::Coinbase,
"42" => Opcode::Timestamp,
"43" => Opcode::Number,
"44" => Opcode::Prevrandao,
"45" => Opcode::Gaslimit,
"46" => Opcode::Chainid,
"47" => Opcode::Selfbalance,
"48" => Opcode::Basefee,
"49" => Opcode::Blobhash,
"4a" => Opcode::Blobbasefee,
"50" => Opcode::Pop,
"51" => Opcode::Mload,
"52" => Opcode::Mstore,
"53" => Opcode::Mstore8,
"54" => Opcode::Sload,
"55" => Opcode::Sstore,
"56" => Opcode::Jump,
"57" => Opcode::Jumpi,
"58" => Opcode::Pc,
"59" => Opcode::Msize,
"5a" => Opcode::Gas,
"5b" => Opcode::Jumpdest,
"5c" => Opcode::Tload,
"5d" => Opcode::Tstore,
"5e" => Opcode::Mcopy,
"5f" => Opcode::Push0,
"60" => Opcode::Push1,
"61" => Opcode::Push2,
"62" => Opcode::Push3,
"63" => Opcode::Push4,
"64" => Opcode::Push5,
"65" => Opcode::Push6,
"66" => Opcode::Push7,
"67" => Opcode::Push8,
"68" => Opcode::Push9,
"69" => Opcode::Push10,
"6a" => Opcode::Push11,
"6b" => Opcode::Push12,
"6c" => Opcode::Push13,
"6d" => Opcode::Push14,
"6e" => Opcode::Push15,
"6f" => Opcode::Push16,
"70" => Opcode::Push17,
"71" => Opcode::Push18,
"72" => Opcode::Push19,
"73" => Opcode::Push20,
"74" => Opcode::Push21,
"75" => Opcode::Push22,
"76" => Opcode::Push23,
"77" => Opcode::Push24,
"78" => Opcode::Push25,
"79" => Opcode::Push26,
"7a" => Opcode::Push27,
"7b" => Opcode::Push28,
"7c" => Opcode::Push29,
"7d" => Opcode::Push30,
"7e" => Opcode::Push31,
"7f" => Opcode::Push32,
"80" => Opcode::Dup1,
"81" => Opcode::Dup2,
"82" => Opcode::Dup3,
"83" => Opcode::Dup4,
"84" => Opcode::Dup5,
"85" => Opcode::Dup6,
"86" => Opcode::Dup7,
"87" => Opcode::Dup8,
"88" => Opcode::Dup9,
"89" => Opcode::Dup10,
"8a" => Opcode::Dup11,
"8b" => Opcode::Dup12,
"8c" => Opcode::Dup13,
"8d" => Opcode::Dup14,
"8e" => Opcode::Dup15,
"8f" => Opcode::Dup16,
"90" => Opcode::Swap1,
"91" => Opcode::Swap2,
"92" => Opcode::Swap3,
"93" => Opcode::Swap4,
"94" => Opcode::Swap5,
"95" => Opcode::Swap6,
"96" => Opcode::Swap7,
"97" => Opcode::Swap8,
"98" => Opcode::Swap9,
"99" => Opcode::Swap10,
"9a" => Opcode::Swap11,
"9b" => Opcode::Swap12,
"9c" => Opcode::Swap13,
"9d" => Opcode::Swap14,
"9e" => Opcode::Swap15,
"9f" => Opcode::Swap16,
"a0" => Opcode::Log0,
"a1" => Opcode::Log1,
"a2" => Opcode::Log2,
"a3" => Opcode::Log3,
"a4" => Opcode::Log4,
"f0" => Opcode::Create,
"f1" => Opcode::Call,
"f2" => Opcode::Callcode,
"f3" => Opcode::Return,
"f4" => Opcode::Delegatecall,
"f5" => Opcode::Create2,
"fa" => Opcode::Staticcall,
"fd" => Opcode::Revert,
"fe" => Opcode::Invalid,
"ff" => Opcode::Selfdestruct,
_ => Opcode::InvalidOpcode,
};
opcode
}
/// Translates an Opcode into a string
pub fn string(&self) -> String {
let opcode_str = match self {
Opcode::Stop => "00",
Opcode::Add => "01",
Opcode::Mul => "02",
Opcode::Sub => "03",
Opcode::Div => "04",
Opcode::Sdiv => "05",
Opcode::Mod => "06",
Opcode::Smod => "07",
Opcode::Addmod => "08",
Opcode::Mulmod => "09",
Opcode::Exp => "0a",
Opcode::Signextend => "0b",
Opcode::Lt => "10",
Opcode::Gt => "11",
Opcode::Slt => "12",
Opcode::Sgt => "13",
Opcode::Eq => "14",
Opcode::Iszero => "15",
Opcode::And => "16",
Opcode::Or => "17",
Opcode::Xor => "18",
Opcode::Not => "19",
Opcode::Byte => "1a",
Opcode::Shl => "1b",
Opcode::Shr => "1c",
Opcode::Sar => "1d",
Opcode::Sha3 => "20",
Opcode::Address => "30",
Opcode::Balance => "31",
Opcode::Origin => "32",
Opcode::Caller => "33",
Opcode::Callvalue => "34",
Opcode::Calldataload => "35",
Opcode::Calldatasize => "36",
Opcode::Calldatacopy => "37",
Opcode::Codesize => "38",
Opcode::Codecopy => "39",
Opcode::Gasprice => "3a",
Opcode::Extcodesize => "3b",
Opcode::Extcodecopy => "3c",
Opcode::Returndatasize => "3d",
Opcode::Returndatacopy => "3e",
Opcode::Extcodehash => "3f",
Opcode::Blockhash => "40",
Opcode::Coinbase => "41",
Opcode::Timestamp => "42",
Opcode::Number => "43",
Opcode::Difficulty => "44",
Opcode::Prevrandao => "44",
Opcode::Gaslimit => "45",
Opcode::Chainid => "46",
Opcode::Selfbalance => "47",
Opcode::Basefee => "48",
Opcode::Blobhash => "49",
Opcode::Blobbasefee => "4a",
Opcode::Pop => "50",
Opcode::Mload => "51",
Opcode::Mstore => "52",
Opcode::Mstore8 => "53",
Opcode::Sload => "54",
Opcode::Sstore => "55",
Opcode::Jump => "56",
Opcode::Jumpi => "57",
Opcode::Pc => "58",
Opcode::Msize => "59",
Opcode::Gas => "5a",
Opcode::Jumpdest => "5b",
Opcode::Tload => "5c",
Opcode::Tstore => "5d",
Opcode::Mcopy => "5e",
Opcode::Push0 => "5f",
Opcode::Push1 => "60",
Opcode::Push2 => "61",
Opcode::Push3 => "62",
Opcode::Push4 => "63",
Opcode::Push5 => "64",
Opcode::Push6 => "65",
Opcode::Push7 => "66",
Opcode::Push8 => "67",
Opcode::Push9 => "68",
Opcode::Push10 => "69",
Opcode::Push11 => "6a",
Opcode::Push12 => "6b",
Opcode::Push13 => "6c",
Opcode::Push14 => "6d",
Opcode::Push15 => "6e",
Opcode::Push16 => "6f",
Opcode::Push17 => "70",
Opcode::Push18 => "71",
Opcode::Push19 => "72",
Opcode::Push20 => "73",
Opcode::Push21 => "74",
Opcode::Push22 => "75",
Opcode::Push23 => "76",
Opcode::Push24 => "77",
Opcode::Push25 => "78",
Opcode::Push26 => "79",
Opcode::Push27 => "7a",
Opcode::Push28 => "7b",
Opcode::Push29 => "7c",
Opcode::Push30 => "7d",
Opcode::Push31 => "7e",
Opcode::Push32 => "7f",
Opcode::Dup1 => "80",
Opcode::Dup2 => "81",
Opcode::Dup3 => "82",
Opcode::Dup4 => "83",
Opcode::Dup5 => "84",
Opcode::Dup6 => "85",
Opcode::Dup7 => "86",
Opcode::Dup8 => "87",
Opcode::Dup9 => "88",
Opcode::Dup10 => "89",
Opcode::Dup11 => "8a",
Opcode::Dup12 => "8b",
Opcode::Dup13 => "8c",
Opcode::Dup14 => "8d",
Opcode::Dup15 => "8e",
Opcode::Dup16 => "8f",
Opcode::Swap1 => "90",
Opcode::Swap2 => "91",
Opcode::Swap3 => "92",
Opcode::Swap4 => "93",
Opcode::Swap5 => "94",
Opcode::Swap6 => "95",
Opcode::Swap7 => "96",
Opcode::Swap8 => "97",
Opcode::Swap9 => "98",
Opcode::Swap10 => "99",
Opcode::Swap11 => "9a",
Opcode::Swap12 => "9b",
Opcode::Swap13 => "9c",
Opcode::Swap14 => "9d",
Opcode::Swap15 => "9e",
Opcode::Swap16 => "9f",
Opcode::Log0 => "a0",
Opcode::Log1 => "a1",
Opcode::Log2 => "a2",
Opcode::Log3 => "a3",
Opcode::Log4 => "a4",
Opcode::Create => "f0",
Opcode::Call => "f1",
Opcode::Callcode => "f2",
Opcode::Return => "f3",
Opcode::Delegatecall => "f4",
Opcode::Create2 => "f5",
Opcode::Staticcall => "fa",
Opcode::Revert => "fd",
Opcode::Invalid => "fe",
Opcode::Selfdestruct => "ff",
Opcode::InvalidOpcode => "xx",
};
opcode_str.to_string()
}
/// Translates an Opcode into opcode string
pub fn op_string(&self) -> String {
let opcode_str = match self {
Opcode::Stop => "Stop",
Opcode::Add => "Add",
Opcode::Mul => "Mul",
Opcode::Sub => "Sub",
Opcode::Div => "Div",
Opcode::Sdiv => "Sdiv",
Opcode::Mod => "Mod",
Opcode::Smod => "Smod",
Opcode::Addmod => "Addmod",
Opcode::Mulmod => "Mulmod",
Opcode::Exp => "Exp",
Opcode::Signextend => "Signextend",
Opcode::Lt => "Lt",
Opcode::Gt => "Gt",
Opcode::Slt => "Slt",
Opcode::Sgt => "Sgt",
Opcode::Eq => "Eq",
Opcode::Iszero => "Iszero",
Opcode::And => "And",
Opcode::Or => "Or",
Opcode::Xor => "Xor",
Opcode::Not => "Not",
Opcode::Byte => "Byte",
Opcode::Shl => "Shl",
Opcode::Shr => "Shr",
Opcode::Sar => "Sar",
Opcode::Sha3 => "Sha3",
Opcode::Address => "Address",
Opcode::Balance => "Balance",
Opcode::Origin => "Origin",
Opcode::Caller => "Caller",
Opcode::Callvalue => "Callvalue",
Opcode::Calldataload => "Calldataload",
Opcode::Calldatasize => "Calldatasize",
Opcode::Calldatacopy => "Calldatacopy",
Opcode::Codesize => "Codesize",
Opcode::Codecopy => "Codecopy",
Opcode::Gasprice => "Gasprice",
Opcode::Extcodesize => "Extcodesize",
Opcode::Extcodecopy => "Extcodecopy",
Opcode::Returndatasize => "Returndatasize",
Opcode::Returndatacopy => "Returndatacopy",
Opcode::Extcodehash => "Extcodehash",
Opcode::Blockhash => "Blockhash",
Opcode::Coinbase => "Coinbase",
Opcode::Timestamp => "Timestamp",
Opcode::Number => "Number",
Opcode::Difficulty => "Difficulty",
Opcode::Prevrandao => "Prevrandao",
Opcode::Gaslimit => "Gaslimit",
Opcode::Chainid => "Chainid",
Opcode::Selfbalance => "Selfbalance",
Opcode::Basefee => "Basefee",
Opcode::Pop => "Pop",
Opcode::Mload => "Mload",
Opcode::Mstore => "Mstore",
Opcode::Mstore8 => "Mstore8",
Opcode::Sload => "Sload",
Opcode::Sstore => "Sstore",
Opcode::Jump => "Jump",
Opcode::Jumpi => "Jumpi",
Opcode::Pc => "Pc",
Opcode::Msize => "Msize",
Opcode::Gas => "Gas",
Opcode::Jumpdest => "Jumpdest",
Opcode::Push0 => "Push0",
Opcode::Push1 => "Push1",
Opcode::Push2 => "Push2",
Opcode::Push3 => "Push3",
Opcode::Push4 => "Push4",
Opcode::Push5 => "Push5",
Opcode::Push6 => "Push6",
Opcode::Push7 => "Push7",
Opcode::Push8 => "Push8",
Opcode::Push9 => "Push9",
Opcode::Push10 => "Push10",
Opcode::Push11 => "Push11",
Opcode::Push12 => "Push12",
Opcode::Push13 => "Push13",
Opcode::Push14 => "Push14",
Opcode::Push15 => "Push15",
Opcode::Push16 => "Push16",
Opcode::Push17 => "Push17",
Opcode::Push18 => "Push18",
Opcode::Push19 => "Push19",
Opcode::Push20 => "Push20",
Opcode::Push21 => "Push21",
Opcode::Push22 => "Push22",
Opcode::Push23 => "Push23",
Opcode::Push24 => "Push24",
Opcode::Push25 => "Push25",
Opcode::Push26 => "Push26",
Opcode::Push27 => "Push27",
Opcode::Push28 => "Push28",
Opcode::Push29 => "Push29",
Opcode::Push30 => "Push30",
Opcode::Push31 => "Push31",
Opcode::Push32 => "Push32",
Opcode::Dup1 => "Dup1",
Opcode::Dup2 => "Dup2",
Opcode::Dup3 => "Dup3",
Opcode::Dup4 => "Dup4",
Opcode::Dup5 => "Dup5",
Opcode::Dup6 => "Dup6",
Opcode::Dup7 => "Dup7",
Opcode::Dup8 => "Dup8",
Opcode::Dup9 => "Dup9",
Opcode::Dup10 => "Dup10",
Opcode::Dup11 => "Dup11",
Opcode::Dup12 => "Dup12",
Opcode::Dup13 => "Dup13",
Opcode::Dup14 => "Dup14",
Opcode::Dup15 => "Dup15",
Opcode::Dup16 => "Dup16",
Opcode::Swap1 => "Swap1",
Opcode::Swap2 => "Swap2",
Opcode::Swap3 => "Swap3",
Opcode::Swap4 => "Swap4",
Opcode::Swap5 => "Swap5",
Opcode::Swap6 => "Swap6",
Opcode::Swap7 => "Swap7",
Opcode::Swap8 => "Swap8",
Opcode::Swap9 => "Swap9",
Opcode::Swap10 => "Swap10",
Opcode::Swap11 => "Swap11",
Opcode::Swap12 => "Swap12",
Opcode::Swap13 => "Swap13",
Opcode::Swap14 => "Swap14",
Opcode::Swap15 => "Swap15",
Opcode::Swap16 => "Swap16",
Opcode::Log0 => "Log0",
Opcode::Log1 => "Log1",
Opcode::Log2 => "Log2",
Opcode::Log3 => "Log3",
Opcode::Log4 => "Log4",
Opcode::Create => "Create",
Opcode::Call => "Call",
Opcode::Callcode => "Callcode",
Opcode::Return => "Return",
Opcode::Delegatecall => "Delegatecall",
Opcode::Create2 => "Create2",
Opcode::Staticcall => "Staticcall",
Opcode::Revert => "Revert",
Opcode::Invalid => "Invalid",
Opcode::Selfdestruct => "Selfdestruct",
Opcode::Blobbasefee => "Blobbasefee",
Opcode::Blobhash => "Blobhash",
Opcode::Mcopy => "Mcopy",
Opcode::Tload => "Tload",
Opcode::Tstore => "Tstore",
Opcode::InvalidOpcode => "InvalidOpcode",
};
opcode_str.to_string()
}
}
impl fmt::Display for Opcode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let opcode_str = self.string();
write!(f, "{}", opcode_str)
}
}
impl From<Opcode> for String {
fn from(o: Opcode) -> Self {
o.string()
}
}
| rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/checks.rs | src/checks.rs | use crate::{evm::*, types::*};
// Check if bytecode contains jumps
pub fn contains_jumps(bytecode: &Bytecode) -> bool {
bytecode.iter().any(|byte| {
(byte.opcode == Opcode::Jump) || (byte.opcode == Opcode::Jumpi)
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_contains_jumps() {
let bytecode: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push1,
pushdata: Some(String::from("4")),
},
ByteData {
code_index: 2,
opcode: Opcode::Jump,
pushdata: None,
},
ByteData {
code_index: 3,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 4,
opcode: Opcode::Jumpdest,
pushdata: None,
},
];
assert_eq!(true, contains_jumps(&bytecode));
}
#[test]
fn test_not_contains_jumps() {
let bytecode: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push1,
pushdata: Some(String::from("80")),
},
ByteData {
code_index: 2,
opcode: Opcode::Push1,
pushdata: Some(String::from("54")),
},
ByteData {
code_index: 4,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 5,
opcode: Opcode::Add,
pushdata: None,
},
];
assert_eq!(false, contains_jumps(&bytecode));
}
}
| rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/types.rs | src/types.rs | use crate::evm::Opcode;
pub type PushData = String;
#[derive(Debug, PartialEq, Clone)]
pub struct ByteData {
pub code_index: usize,
pub opcode: Opcode,
pub pushdata: Option<PushData>
}
pub type Bytecode = Vec<ByteData>; | rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/utils.rs | src/utils.rs | use crate::evm::*;
use std::{path::Path, process::Command};
#[derive(Debug, Clone, strum::EnumString)]
#[strum(ascii_case_insensitive)]
pub enum Source {
Raw,
Huff,
}
pub fn compile_huff(path: &str) -> Result<String, String> {
if !Path::new(path).exists() {
return Err(format!("File not found: {}", path));
}
let output = Command::new("huffc")
.args(["-b", path])
.output()
.map_err(|e| format!("Failed to compile the huff code: {}", e))?;
if !output.status.success() {
return Err(format!("huffc failed: {}",
String::from_utf8_lossy(&output.stderr)));
}
let output_str = String::from_utf8_lossy(&output.stdout);
// Extract the bytecode from last line of the o/p
let bytecode = output_str
.lines()
.last()
.ok_or_else(|| "No output".to_string())?
.trim()
.trim_end_matches('%')
.to_string();
Ok(bytecode)
}
// Find minimum viable length for pushdata
pub fn min_pushdata_len(string: &String) -> (usize, String) {
let mut len = string.len() / 2;
let mut start = 0;
for i in 0..len {
if string[i * 2..i * 2 + 2] == String::from("00") {
len -= 1;
start += 2;
} else {
break;
}
};
(len, string[start..].to_string())
}
// Get push size from PushN opcode
pub fn match_push_n(opcode: Opcode) -> usize {
match opcode {
Opcode::Push0 => 0,
Opcode::Push1 => 1,
Opcode::Push2 => 2,
Opcode::Push3 => 3,
Opcode::Push4 => 4,
Opcode::Push5 => 5,
Opcode::Push6 => 6,
Opcode::Push7 => 7,
Opcode::Push8 => 8,
Opcode::Push9 => 9,
Opcode::Push10 => 10,
Opcode::Push11 => 11,
Opcode::Push12 => 12,
Opcode::Push13 => 13,
Opcode::Push14 => 14,
Opcode::Push15 => 15,
Opcode::Push16 => 16,
Opcode::Push17 => 17,
Opcode::Push18 => 18,
Opcode::Push19 => 19,
Opcode::Push20 => 20,
Opcode::Push21 => 21,
Opcode::Push22 => 22,
Opcode::Push23 => 23,
Opcode::Push24 => 24,
Opcode::Push25 => 25,
Opcode::Push26 => 26,
Opcode::Push27 => 27,
Opcode::Push28 => 28,
Opcode::Push29 => 29,
Opcode::Push30 => 30,
Opcode::Push31 => 31,
Opcode::Push32 => 32,
_ => 0,
}
}
mod tests {
use super::*;
#[test]
fn test_min_pushdata_len() {
let push_string = String::from("10101010");
assert_eq!((4, String::from("10101010")), min_pushdata_len(&push_string));
let push_string = String::from("00100010");
assert_eq!((3, String::from("100010")), min_pushdata_len(&push_string));
}
}
| rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/disassembler.rs | src/disassembler.rs | use crate::{evm::*, types::*, utils::*};
// Output disassembled bytecode string
pub fn output(bytecode: &Bytecode) -> String {
let mut output: String = String::from("");
for byte in bytecode {
if byte.pushdata.is_none() {
output.push_str(format!("\n{} {}", byte.code_index, byte.opcode.op_string()).as_str());
} else {
output.push_str(format!("\n{} {} {}", byte.code_index, byte.opcode.op_string(), byte.pushdata.as_ref().unwrap()).as_str());
}
}
output
}
// Disassemble bytecode
pub fn disassemble(byte_string: &String) -> Bytecode {
let mut i = 0;
let mut code_index: usize = 0;
let trimmed_byte_string: &str;
// Remove leading 0x if present
if byte_string.starts_with(&String::from("0x")) {
trimmed_byte_string = &byte_string[2..];
} else {
trimmed_byte_string = byte_string;
}
let mut bytecode: Bytecode = Vec::new();
// Grab each opcode and corresponding pushdata if present
while i < trimmed_byte_string.len() {
let opcode = Opcode::new(&trimmed_byte_string[i..i + 2]);
let bytes_to_push = match_push_n(opcode);
// No pushdata for push0
if opcode == Opcode::Push0 {
bytecode.push(ByteData {
code_index: code_index,
opcode: opcode,
pushdata: None,
});
i += 2;
code_index += 1;
} else {
bytecode.push(ByteData {
code_index: code_index,
opcode: opcode,
pushdata: Some(String::from(&trimmed_byte_string[i + 2..i + 2 + bytes_to_push * 2])),
});
i += 2 + bytes_to_push * 2;
code_index += 1 + bytes_to_push;
}
}
bytecode
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_disassemble() {
let byte_string = String::from("0x6180806054");
let disassembled_bytes: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push2,
pushdata: Some(String::from("8080")),
},
ByteData {
code_index: 3,
opcode: Opcode::Push1,
pushdata: Some(String::from("54")),
},
];
assert_eq!(disassembled_bytes, disassemble(&byte_string));
}
#[test]
fn test_disassemble_no_0x() {
let byte_string = String::from("6180806054");
let disassembled_bytes: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push2,
pushdata: Some(String::from("8080")),
},
ByteData {
code_index: 3,
opcode: Opcode::Push1,
pushdata: Some(String::from("54")),
},
];
assert_eq!(disassembled_bytes, disassemble(&byte_string));
}
#[test]
fn test_disassemble_push0() {
// Test PUSH0 followed by PUSH1 01 to verify both regular push and Push0 work
let byte_string = String::from("5f6001"); // PUSH0 PUSH1 01
let disassembled_bytes: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push0,
pushdata: None,
},
ByteData {
code_index: 1,
opcode: Opcode::Push1,
pushdata: Some(String::from("01")),
},
];
assert_eq!(disassembled_bytes, disassemble(&byte_string));
}
}
| rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/main.rs | src/main.rs | use colored::Colorize;
use std::iter;
use clap::Parser;
use crate::{
assembler::*,
checks::contains_jumps,
disassembler::*,
peephole::*,
types::*,
utils::{Source, compile_huff},
};
mod assembler;
mod checks;
mod disassembler;
mod evm;
mod peephole;
mod rules;
mod types;
mod utils;
#[derive(Parser)]
pub struct Cli {
bytecode: String,
#[clap(long, help = "Source type (raw/huff)", default_value = "raw")]
source: Source,
}
fn main() {
let args: Cli = Cli::parse();
let bytecode = match args.source {
Source::Raw => args.bytecode.clone(),
Source::Huff => match compile_huff(&args.bytecode) {
Ok(code) => code,
Err(e) => {
eprintln!("Error: {}", e.red());
std::process::exit(1);
}
}
};
println!("Bytecode: {}", bytecode);
let bytes: Bytecode = disassemble(&bytecode);
let output_bytes = output(&bytes);
let jump_warning: bool = contains_jumps(&bytes);
let optimized_bytes: Bytecode = optimize(&bytes);
let output_optimized_bytes = output(&optimized_bytes);
let optimized_bytecode = assemble(&optimized_bytes);
// Pretty print unoptimized and optimized bytecode
let left_pad = output_bytes.lines().map(|l| l.len()).max().unwrap_or(0) + 2;
println!("\n{:width$} {}", "Unoptimized", "Optimized", width = left_pad);
for (output_bytes, output_optimized_bytes) in output_bytes.lines().zip(output_optimized_bytes.lines().chain(iter::repeat(""))) {
println!("{:width$} {}", output_bytes, output_optimized_bytes, width = left_pad);
}
println!("\nOptimized bytecode: {}", optimized_bytecode);
disassemble(&optimized_bytecode);
// Warn if jumps present
if jump_warning {
println!(
"{}",
format!("WARNING: Jumps are not yet supported. Output jumps are likely invalid.").yellow()
);
}
}
| rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/rules.rs | src/rules.rs | use crate::{evm::*, types::*, utils::*};
// Check rules against provided peephole
pub fn check_rules(peephole: &mut Bytecode) -> Bytecode {
// Individual op checks
for i in 0..2 {
let mut byte: ByteData = peephole[i].clone();
// Reducable push size
for j in 1..32 {
if byte.opcode == PUSH_OPS[j] {
let (min_len, min_string) = min_pushdata_len(&peephole[i].clone().pushdata.as_ref().unwrap());
if min_len == 0 {
byte = ByteData {
code_index: byte.code_index,
opcode: Opcode::Push0,
pushdata: None,
};
} else if min_len - 1 < j {
byte = ByteData {
code_index: byte.code_index,
opcode: PUSH_OPS[min_len],
pushdata: Some(min_string),
};
}
}
}
peephole[i] = byte;
}
// Peephole (2 op) checks
let new_bytecode: Bytecode = match peephole[..] {
// Redundant swaps on commutative operations
[ByteData {
opcode: Opcode::Swap1,
..
}, ByteData {
opcode: Opcode::Add,
..
}] => peephole[1..].to_vec(),
[ByteData {
opcode: Opcode::Swap1,
..
}, ByteData {
opcode: Opcode::Mul,
..
}] => peephole[1..].to_vec(),
[ByteData {
opcode: Opcode::Swap1,
..
}, ByteData {
opcode: Opcode::Eq,
..
}] => peephole[1..].to_vec(),
[ByteData {
opcode: Opcode::Swap1,
..
}, ByteData {
opcode: Opcode::And,
..
}] => peephole[1..].to_vec(),
[ByteData {
opcode: Opcode::Swap1,
..
}, ByteData {
opcode: Opcode::Or,
..
}] => peephole[1..].to_vec(),
[ByteData {
opcode: Opcode::Swap1,
..
}, ByteData {
opcode: Opcode::Xor,
..
}] => peephole[1..].to_vec(),
// Operations involving an expression and itself
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::And,
..
}] => [].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Or,
..
}] => [].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Xor,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}
].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Sub,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}
].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Eq,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Push1,
pushdata: Some(String::from("01")),
},
].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Lt,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
},
].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Slt,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
},
].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Gt,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
},
].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Sgt,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
},
].to_vec(),
[ByteData {
opcode: Opcode::Dup1,
..
}, ByteData {
opcode: Opcode::Mod,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
},
].to_vec(),
// Duplicate pushes
[ByteData {
opcode: Opcode::Push1,
..
}, ByteData {
opcode: Opcode::Push1,
..
}] | [ByteData {
opcode: Opcode::Push2,
..
}, ByteData {
opcode: Opcode::Push2,
..
}] | [ByteData {
opcode: Opcode::Push3,
..
}, ByteData {
opcode: Opcode::Push3,
..
}] | [ByteData {
opcode: Opcode::Push4,
..
}, ByteData {
opcode: Opcode::Push4,
..
}] | [ByteData {
opcode: Opcode::Push5,
..
}, ByteData {
opcode: Opcode::Push5,
..
}] | [ByteData {
opcode: Opcode::Push6,
..
}, ByteData {
opcode: Opcode::Push6,
..
}] | [ByteData {
opcode: Opcode::Push7,
..
}, ByteData {
opcode: Opcode::Push7,
..
}] | [ByteData {
opcode: Opcode::Push8,
..
}, ByteData {
opcode: Opcode::Push8,
..
}] | [ByteData {
opcode: Opcode::Push9,
..
}, ByteData {
opcode: Opcode::Push9,
..
}] | [ByteData {
opcode: Opcode::Push10,
..
}, ByteData {
opcode: Opcode::Push10,
..
}] | [ByteData {
opcode: Opcode::Push11,
..
}, ByteData {
opcode: Opcode::Push11,
..
}] | [ByteData {
opcode: Opcode::Push12,
..
}, ByteData {
opcode: Opcode::Push12,
..
}] | [ByteData {
opcode: Opcode::Push13,
..
}, ByteData {
opcode: Opcode::Push13,
..
}] | [ByteData {
opcode: Opcode::Push14,
..
}, ByteData {
opcode: Opcode::Push14,
..
}] | [ByteData {
opcode: Opcode::Push15,
..
}, ByteData {
opcode: Opcode::Push15,
..
}] | [ByteData {
opcode: Opcode::Push16,
..
}, ByteData {
opcode: Opcode::Push16,
..
}] | [ByteData {
opcode: Opcode::Push17,
..
}, ByteData {
opcode: Opcode::Push17,
..
}] | [ByteData {
opcode: Opcode::Push18,
..
}, ByteData {
opcode: Opcode::Push18,
..
}] | [ByteData {
opcode: Opcode::Push19,
..
}, ByteData {
opcode: Opcode::Push19,
..
}] | [ByteData {
opcode: Opcode::Push20,
..
}, ByteData {
opcode: Opcode::Push20,
..
}] | [ByteData {
opcode: Opcode::Push21,
..
}, ByteData {
opcode: Opcode::Push21,
..
}] | [ByteData {
opcode: Opcode::Push22,
..
}, ByteData {
opcode: Opcode::Push22,
..
}] | [ByteData {
opcode: Opcode::Push23,
..
}, ByteData {
opcode: Opcode::Push23,
..
}] | [ByteData {
opcode: Opcode::Push24,
..
}, ByteData {
opcode: Opcode::Push24,
..
}] | [ByteData {
opcode: Opcode::Push25,
..
}, ByteData {
opcode: Opcode::Push25,
..
}] | [ByteData {
opcode: Opcode::Push26,
..
}, ByteData {
opcode: Opcode::Push26,
..
}] | [ByteData {
opcode: Opcode::Push27,
..
}, ByteData {
opcode: Opcode::Push27,
..
}] | [ByteData {
opcode: Opcode::Push28,
..
}, ByteData {
opcode: Opcode::Push28,
..
}] | [ByteData {
opcode: Opcode::Push29,
..
}, ByteData {
opcode: Opcode::Push29,
..
}] | [ByteData {
opcode: Opcode::Push30,
..
}, ByteData {
opcode: Opcode::Push30,
..
}] | [ByteData {
opcode: Opcode::Push31,
..
}, ByteData {
opcode: Opcode::Push31,
..
}] | [ByteData {
opcode: Opcode::Push32,
..
}, ByteData {
opcode: Opcode::Push32,
..
}] if peephole[0].pushdata.as_ref().unwrap() == peephole[1].pushdata.as_ref().unwrap() => [
ByteData {
code_index: peephole[0].code_index,
opcode: peephole[0].opcode,
pushdata: Some(peephole[0].pushdata.as_ref().unwrap().to_string()),
},
ByteData {
code_index: peephole[1].code_index,
opcode: Opcode::Dup1,
pushdata: None,
},
].to_vec(),
// Logical instruction combinations
[ByteData {
opcode: Opcode::Not,
..
}, ByteData {
opcode: Opcode::Not,
..
}] => [].to_vec(),
// Double negation resulting in boolean
[ByteData {
opcode: Opcode::Xor,
..
}, ByteData {
opcode: Opcode::Iszero,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Eq,
pushdata: None,
}
].to_vec(),
[ByteData {
opcode: Opcode::Sub,
..
}, ByteData {
opcode: Opcode::Iszero,
..
}] => [
ByteData {
code_index: peephole[0].code_index,
opcode: Opcode::Eq,
pushdata: None,
}
].to_vec(),
_ => peephole[..].to_vec(),
};
new_bytecode
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_commutative_swaps() {
// Swap1, Add => Add
let mut peephole: Bytecode = vec![
ByteData {
code_index: 4,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 5,
opcode: Opcode::Add,
pushdata: None,
},
];
let optimized_peephole: Bytecode = vec![ByteData {
code_index: 5,
opcode: Opcode::Add,
pushdata: None,
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Swap1, Mul => Mul
let mut peephole: Bytecode = vec![
ByteData {
code_index: 4,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 5,
opcode: Opcode::Mul,
pushdata: None,
},
];
let optimized_peephole: Bytecode = vec![ByteData {
code_index: 5,
opcode: Opcode::Mul,
pushdata: None,
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Swap1, Eq => Eq
let mut peephole: Bytecode = vec![
ByteData {
code_index: 4,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 5,
opcode: Opcode::Eq,
pushdata: None,
},
];
let optimized_peephole: Bytecode = vec![ByteData {
code_index: 5,
opcode: Opcode::Eq,
pushdata: None,
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Swap1, And => And
let mut peephole: Bytecode = vec![
ByteData {
code_index: 4,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 5,
opcode: Opcode::And,
pushdata: None,
},
];
let optimized_peephole: Bytecode = vec![ByteData {
code_index: 5,
opcode: Opcode::And,
pushdata: None,
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Swap1, Or => Or
let mut peephole: Bytecode = vec![
ByteData {
code_index: 4,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 5,
opcode: Opcode::Or,
pushdata: None,
},
];
let optimized_peephole: Bytecode = vec![ByteData {
code_index: 5,
opcode: Opcode::Or,
pushdata: None,
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Swap1, Xor => Xor
let mut peephole: Bytecode = vec![
ByteData {
code_index: 4,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 5,
opcode: Opcode::Xor,
pushdata: None,
},
];
let optimized_peephole: Bytecode = vec![ByteData {
code_index: 5,
opcode: Opcode::Xor,
pushdata: None,
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
}
#[test]
fn test_dup_expression_operations() {
// Dup1, And => []
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::And,
pushdata: None,
}];
let optimized_peephole: Vec<ByteData> = Vec::new();
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Or => []
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Or,
pushdata: None,
}];
let optimized_peephole: Vec<ByteData> = Vec::new();
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Xor => Push1, 00
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Xor,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Sub => Push1, 00
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Sub,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Eq => Push1, 01
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Eq,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("01")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Lt => Push1, 00
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Lt,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Slt => Push1, 00
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Slt,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Gt => Push1, 00
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Gt,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Sgt => Push1, 00
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Sgt,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Dup1, Mod => Push1, 00
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Dup1,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Mod,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
}
#[test]
fn test_duplicate_pushes() {
// Test Push1 through Push32 (skip Push0 since it has no pushdata)
for i in 1..32 {
// PushN, X, PushN, X => PushN, X, Dup1
let mut peephole = vec![ByteData {
code_index: 4,
opcode: PUSH_OPS[i],
pushdata: Some(std::iter::repeat("10").take(i).collect::<String>()),
}, ByteData {
code_index: 5,
opcode: PUSH_OPS[i],
pushdata: Some(std::iter::repeat("10").take(i).collect::<String>()),
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: PUSH_OPS[i],
pushdata: Some(std::iter::repeat("10").take(i).collect::<String>()),
}, ByteData {
code_index: 5,
opcode: Opcode::Dup1,
pushdata: None,
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
}
// Push0 is more efficient than dup1 so don't optimize
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push0,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Push0,
pushdata: None,
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push0,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Push0,
pushdata: None,
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
}
#[test]
fn test_reduced_push_size() {
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push2,
pushdata: Some(String::from("0080")),
}, ByteData {
code_index: 5,
opcode: Opcode::Push18,
pushdata: Some(String::from("000000000000000000002030000000004040")),
}];
let optimized_peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Push1,
pushdata: Some(String::from("80")),
}, ByteData {
code_index: 5,
opcode: Opcode::Push8,
pushdata: Some(String::from("2030000000004040")),
}];
assert_eq!(optimized_peephole, check_rules(&mut peephole));
}
#[test]
fn test_logical_instruction_combinations() {
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Not,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Not,
pushdata: None,
}];
let optimized_peephole: Bytecode = [].to_vec();
assert_eq!(optimized_peephole, check_rules(&mut peephole));
}
#[test]
fn test_double_negation() {
// Xor, Iszero => Eq
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Xor,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Iszero,
pushdata: None,
}];
let optimized_peephole = [
ByteData {
code_index: 4,
opcode: Opcode::Eq,
pushdata: None,
}
].to_vec();
assert_eq!(optimized_peephole, check_rules(&mut peephole));
// Sub, Iszero => Eq
let mut peephole = vec![ByteData {
code_index: 4,
opcode: Opcode::Sub,
pushdata: None,
}, ByteData {
code_index: 5,
opcode: Opcode::Iszero,
pushdata: None,
}];
let optimized_peephole = [
ByteData {
code_index: 4,
opcode: Opcode::Eq,
pushdata: None,
}
].to_vec();
assert_eq!(optimized_peephole, check_rules(&mut peephole));
}
}
| rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
kadenzipfel/bytepeep | https://github.com/kadenzipfel/bytepeep/blob/f5ff2a69129155a7818e735dbe62f58eeb277731/src/peephole.rs | src/peephole.rs | use crate::{evm::*, rules::*, types::*};
// Optimize bytecode by creating peepholes and running rule checks
pub fn optimize(bytecode: &Bytecode) -> Bytecode {
let mut i: usize = 0;
let mut code_index: usize = 0;
let mut optimized_bytecode: Bytecode = vec![];
while i < bytecode.len() {
let mut increment = 0;
// If current opcode is last, push byte
if i + 1 >= bytecode.len() {
let byte = bytecode[i].clone();
optimized_bytecode.push(ByteData {
code_index: code_index,
opcode: byte.opcode,
pushdata: byte.pushdata,
});
break;
}
// Grab two byte peephole
let mut bytes: Bytecode = vec![bytecode[i].clone(), bytecode[i + 1].clone()];
// Check peephole for rule violations, and place first optimized byte in bytecode
let peeped_bytes = check_rules(&mut bytes);
// Handle both bytes removed
if peeped_bytes.len() == 0 {
i += 2;
continue;
}
let byte: ByteData = peeped_bytes[0].clone();
let byte_code_index = ByteData {
code_index: code_index,
opcode: byte.opcode,
pushdata: byte.pushdata,
};
optimized_bytecode.push(byte_code_index);
let mut push_data_size: usize = 0;
if !peeped_bytes[0].clone().pushdata.is_none() {
push_data_size = peeped_bytes[0].clone().pushdata.unwrap().len() / 2;
}
code_index += 1 + push_data_size;
if peeped_bytes.len() == 2 {
let byte: ByteData = peeped_bytes[1].clone();
// If second byte returned different from input, push to optimized bytecode vector
if byte.opcode != bytecode[i + 1].clone().opcode {
optimized_bytecode.push(ByteData {
code_index: code_index,
opcode: byte.opcode,
pushdata: byte.pushdata,
});
if !peeped_bytes[1].clone().pushdata.is_none() {
push_data_size = peeped_bytes[1].clone().pushdata.unwrap().len() / 2;
} else {
push_data_size = 0
}
code_index += 1 + push_data_size;
increment += 2;
} else {
increment += 1;
}
} else {
increment += 2;
}
i += increment;
}
optimized_bytecode
}
mod tests {
use super::*;
#[test]
fn test_optimize() {
let bytecode: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push2,
pushdata: Some(String::from("0080")),
},
ByteData {
code_index: 3,
opcode: Opcode::Dup1,
pushdata: None,
},
ByteData {
code_index: 4,
opcode: Opcode::Xor,
pushdata: None,
},
ByteData {
code_index: 5,
opcode: Opcode::Not,
pushdata: None,
},
ByteData {
code_index: 6,
opcode: Opcode::Not,
pushdata: None,
},
ByteData {
code_index: 7,
opcode: Opcode::Push2,
pushdata: Some(String::from("8080")),
},
ByteData {
code_index: 10,
opcode: Opcode::Push2,
pushdata: Some(String::from("8080")),
},
ByteData {
code_index: 13,
opcode: Opcode::Push1,
pushdata: Some(String::from("54")),
},
ByteData {
code_index: 14,
opcode: Opcode::Swap1,
pushdata: None,
},
ByteData {
code_index: 15,
opcode: Opcode::Add,
pushdata: None,
},
];
let optimized_bytecode: Bytecode = vec![
ByteData {
code_index: 0,
opcode: Opcode::Push1,
pushdata: Some(String::from("80")),
},
ByteData {
code_index: 2,
opcode: Opcode::Push1,
pushdata: Some(String::from("00")),
},
ByteData {
code_index: 4,
opcode: Opcode::Push2,
pushdata: Some(String::from("8080")),
},
ByteData {
code_index: 7,
opcode: Opcode::Dup1,
pushdata: None,
},
ByteData {
code_index: 8,
opcode: Opcode::Push1,
pushdata: Some(String::from("54")),
},
ByteData {
code_index: 10,
opcode: Opcode::Add,
pushdata: None,
},
];
assert_eq!(optimized_bytecode, optimize(&bytecode));
}
}
| rust | Apache-2.0 | f5ff2a69129155a7818e735dbe62f58eeb277731 | 2026-01-04T20:25:00.748536Z | false |
rbspy/read-process-memory | https://github.com/rbspy/read-process-memory/blob/a116fd5679a7aeee4b2b4d0843ef68c8b82e2fac/src/lib.rs | src/lib.rs | //! Read memory from another process' address space.
//!
//! This crate provides a trait—[`CopyAddress`](trait.CopyAddress.html),
//! and a helper function—[`copy_address`](fn.copy_address.html) that
//! allow reading memory from another process.
//!
//! Note: you may not always have permission to read memory from another
//! process! This may require `sudo` on some systems, and may fail even with
//! `sudo` on macOS. You are most likely to succeed if you are attempting to
//! read a process that you have spawned yourself.
//!
//! # Examples
//!
//! ```rust,no_run
//! # use std::convert::TryInto;
//! # use std::io;
//! use read_process_memory::*;
//!
//! # fn foo(pid: Pid, address: usize, size: usize) -> io::Result<()> {
//! let handle: ProcessHandle = pid.try_into()?;
//! let bytes = copy_address(address, size, &handle)?;
//! # Ok(())
//! # }
//! ```
#[doc(hidden)]
#[doc = include_str!("../README.md")]
mod readme {}
use std::io;
/// A trait that provides a method for reading memory from another process.
pub trait CopyAddress {
/// Try to copy `buf.len()` bytes from `addr` in the process `self`, placing
/// them in `buf`.
fn copy_address(&self, addr: usize, buf: &mut [u8]) -> io::Result<()>;
}
/// A process ID.
pub use crate::platform::Pid;
/// A handle to a running process. This is not a process ID on all platforms.
///
/// For convenience, this crate implements `TryFrom`-backed conversions from
/// `Pid` to `ProcessHandle`.
///
/// # Examples
///
/// ```rust,no_run
/// use read_process_memory::*;
/// use std::convert::TryInto;
/// use std::io;
///
/// fn pid_to_handle(pid: Pid) -> io::Result<ProcessHandle> {
/// Ok(pid.try_into()?)
/// }
/// ```
///
/// This operation is not guaranteed to succeed. Specifically, on Windows
/// `OpenProcess` may fail. On macOS `task_for_pid` will generally fail
/// unless run as root, and even then it may fail when called on certain
/// programs; it may however run without root on the current process.
pub use crate::platform::ProcessHandle;
#[cfg(target_os = "linux")]
mod platform {
use libc::{c_void, iovec, pid_t, process_vm_readv};
use std::convert::TryFrom;
use std::fs;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::process::Child;
use super::CopyAddress;
/// On Linux a `Pid` is just a `libc::pid_t`.
pub type Pid = pid_t;
/// On Linux a `ProcessHandle` is just a `libc::pid_t`.
#[derive(Clone)]
pub struct ProcessHandle(Pid);
/// On Linux, process handle is a pid.
impl TryFrom<Pid> for ProcessHandle {
type Error = io::Error;
fn try_from(pid: Pid) -> io::Result<Self> {
Ok(Self(pid))
}
}
/// A `process::Child` always has a pid, which is all we need on Linux.
impl TryFrom<&Child> for ProcessHandle {
type Error = io::Error;
fn try_from(child: &Child) -> io::Result<Self> {
Self::try_from(child.id() as Pid)
}
}
impl CopyAddress for ProcessHandle {
fn copy_address(&self, addr: usize, buf: &mut [u8]) -> io::Result<()> {
let local_iov = iovec {
iov_base: buf.as_mut_ptr() as *mut c_void,
iov_len: buf.len(),
};
let remote_iov = iovec {
iov_base: addr as *mut c_void,
iov_len: buf.len(),
};
let result = unsafe { process_vm_readv(self.0, &local_iov, 1, &remote_iov, 1, 0) };
if result == -1 {
match io::Error::last_os_error().raw_os_error() {
Some(libc::ENOSYS) | Some(libc::EPERM) => {
// fallback to reading /proc/$pid/mem if kernel does not
// implement process_vm_readv()
let mut procmem = fs::File::open(format!("/proc/{}/mem", self.0))?;
procmem.seek(io::SeekFrom::Start(addr as u64))?;
procmem.read_exact(buf)
}
_ => Err(io::Error::last_os_error()),
}
} else {
Ok(())
}
}
}
}
#[cfg(target_os = "macos")]
mod platform {
use libc::{c_int, pid_t};
use mach::kern_return::{kern_return_t, KERN_SUCCESS};
use mach::port::{mach_port_name_t, mach_port_t, MACH_PORT_NULL};
use mach::vm_types::{mach_vm_address_t, mach_vm_size_t};
use std::convert::TryFrom;
use std::io;
use std::process::Child;
use super::CopyAddress;
#[allow(non_camel_case_types)]
type vm_map_t = mach_port_t;
#[allow(non_camel_case_types)]
type vm_address_t = mach_vm_address_t;
#[allow(non_camel_case_types)]
type vm_size_t = mach_vm_size_t;
/// On macOS a `Pid` is just a `libc::pid_t`.
pub type Pid = pid_t;
/// On macOS a `ProcessHandle` is a mach port.
#[derive(Clone)]
pub struct ProcessHandle(mach_port_name_t);
extern "C" {
fn vm_read_overwrite(
target_task: vm_map_t,
address: vm_address_t,
size: vm_size_t,
data: vm_address_t,
out_size: *mut vm_size_t,
) -> kern_return_t;
}
/// A small wrapper around `task_for_pid`, which takes a pid and returns the
/// mach port representing its task.
fn task_for_pid(pid: Pid) -> io::Result<mach_port_name_t> {
if pid == unsafe { libc::getpid() } as Pid {
return Ok(unsafe { mach::traps::mach_task_self() });
}
let mut task: mach_port_name_t = MACH_PORT_NULL;
unsafe {
let result =
mach::traps::task_for_pid(mach::traps::mach_task_self(), pid as c_int, &mut task);
if result != KERN_SUCCESS {
return Err(io::Error::last_os_error());
}
}
Ok(task)
}
/// A `Pid` can be turned into a `ProcessHandle` with `task_for_pid`.
impl TryFrom<Pid> for ProcessHandle {
type Error = io::Error;
fn try_from(pid: Pid) -> io::Result<Self> {
Ok(Self(task_for_pid(pid)?))
}
}
/// On Darwin, process handle is a mach port name.
impl TryFrom<mach_port_name_t> for ProcessHandle {
type Error = io::Error;
fn try_from(mach_port_name: mach_port_name_t) -> io::Result<Self> {
Ok(Self(mach_port_name))
}
}
/// This `TryFrom` impl simply calls the `TryFrom` impl for `Pid`.
///
/// Unfortunately spawning a process on macOS does not hand back a mach
/// port by default (you have to jump through several hoops to get at it),
/// so there's no simple implementation of `TryFrom` Child
/// `for::Child`. This implementation is just provided for symmetry
/// with other platforms to make writing cross-platform code easier.
///
/// Ideally we would provide an implementation of
/// `std::process::Command::spawn` that jumped through those hoops and
/// provided the task port.
impl TryFrom<&Child> for ProcessHandle {
type Error = io::Error;
fn try_from(child: &Child) -> io::Result<Self> {
Self::try_from(child.id() as Pid)
}
}
/// Use `vm_read` to read memory from another process on macOS.
impl CopyAddress for ProcessHandle {
fn copy_address(&self, addr: usize, buf: &mut [u8]) -> io::Result<()> {
let mut read_len = buf.len() as vm_size_t;
let result = unsafe {
vm_read_overwrite(
self.0,
addr as vm_address_t,
buf.len() as vm_size_t,
buf.as_mut_ptr() as vm_address_t,
&mut read_len,
)
};
if read_len != buf.len() as vm_size_t {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"Mismatched read sizes for `vm_read` (expected {}, got {})",
buf.len(),
read_len
),
));
}
if result != KERN_SUCCESS {
return Err(io::Error::last_os_error());
}
Ok(())
}
}
}
#[cfg(target_os = "freebsd")]
mod platform {
use libc::{c_int, c_void, pid_t};
use libc::{waitpid, EBUSY, PIOD_READ_D, PT_ATTACH, PT_DETACH, PT_IO, WIFSTOPPED};
use std::convert::TryFrom;
use std::process::Child;
use std::{io, ptr};
use super::CopyAddress;
/// On FreeBSD a `Pid` is just a `libc::pid_t`.
pub type Pid = pid_t;
/// On FreeBSD a `ProcessHandle` is just a `libc::pid_t`.
#[derive(Clone)]
pub struct ProcessHandle(Pid);
#[repr(C)]
struct PtraceIoDesc {
piod_op: c_int,
piod_offs: *mut c_void,
piod_addr: *mut c_void,
piod_len: usize,
}
/// If process is already traced, PT_ATTACH call returns
/// EBUSY. This structure is needed to avoid double locking the process.
/// - `Release` variant means we can safely detach from the process.
/// - `NoRelease` variant means that process was already attached, so we
/// shall not attempt to detach from it.
#[derive(PartialEq)]
enum PtraceLockState {
Release,
NoRelease,
}
extern "C" {
/// libc version of ptrace takes *mut i8 as third argument,
/// which is not very ergonomic if we have a struct.
fn ptrace(request: c_int, pid: pid_t, io_desc: *const PtraceIoDesc, data: c_int) -> c_int;
}
/// On FreeBSD, process handle is a pid.
impl TryFrom<Pid> for ProcessHandle {
type Error = io::Error;
fn try_from(pid: Pid) -> io::Result<Self> {
Ok(Self(pid))
}
}
/// A `process::Child` always has a pid, which is all we need on FreeBSD.
impl TryFrom<&Child> for ProcessHandle {
type Error = io::Error;
fn try_from(child: &Child) -> io::Result<Self> {
Self::try_from(child.id() as Pid)
}
}
/// Attach to a process `pid` and wait for the process to be stopped.
fn ptrace_attach(pid: Pid) -> io::Result<PtraceLockState> {
let attach_status = unsafe { ptrace(PT_ATTACH, pid, ptr::null_mut(), 0) };
let last_error = io::Error::last_os_error();
if let Some(error) = last_error.raw_os_error() {
if attach_status == -1 {
return match error {
EBUSY => Ok(PtraceLockState::NoRelease),
_ => Err(last_error),
};
}
}
let mut wait_status = 0;
let stopped = unsafe {
waitpid(pid, &mut wait_status as *mut _, 0);
WIFSTOPPED(wait_status)
};
if !stopped {
Err(io::Error::last_os_error())
} else {
Ok(PtraceLockState::Release)
}
}
/// Read process `pid` memory at `addr` to `buf` via PT_IO ptrace call.
fn ptrace_io(pid: Pid, addr: usize, buf: &mut [u8]) -> io::Result<()> {
let ptrace_io_desc = PtraceIoDesc {
piod_op: PIOD_READ_D,
piod_offs: addr as *mut c_void,
piod_addr: buf.as_mut_ptr() as *mut c_void,
piod_len: buf.len(),
};
let result = unsafe { ptrace(PT_IO, pid, &ptrace_io_desc as *const _, 0) };
if result == -1 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Detach from the process `pid`.
fn ptrace_detach(pid: Pid) -> io::Result<()> {
let detach_status = unsafe { ptrace(PT_DETACH, pid, ptr::null_mut(), 0) };
if detach_status == -1 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
impl CopyAddress for ProcessHandle {
fn copy_address(&self, addr: usize, buf: &mut [u8]) -> io::Result<()> {
let should_detach = ptrace_attach(self.0)? == PtraceLockState::Release;
let result = ptrace_io(self.0, addr, buf);
if should_detach {
ptrace_detach(self.0)?
}
result
}
}
}
#[cfg(windows)]
mod platform {
use std::convert::TryFrom;
use std::io;
use std::mem;
use std::ops::Deref;
use std::os::raw::c_void;
use std::os::windows::io::{AsRawHandle, RawHandle};
use std::process::Child;
use std::ptr;
use std::sync::Arc;
use windows_sys::Win32::Foundation::{CloseHandle, HANDLE};
use windows_sys::Win32::System::Diagnostics::Debug::ReadProcessMemory;
use windows_sys::Win32::System::Threading::{OpenProcess, PROCESS_VM_READ};
use super::CopyAddress;
/// On Windows a `Pid` is a `DWORD`.
pub type Pid = u32;
#[derive(Eq, PartialEq, Hash)]
struct ProcessHandleInner(HANDLE);
/// On Windows a `ProcessHandle` is a `HANDLE`.
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct ProcessHandle(Arc<ProcessHandleInner>);
impl Deref for ProcessHandle {
type Target = HANDLE;
fn deref(&self) -> &Self::Target {
&self.0 .0
}
}
impl Drop for ProcessHandleInner {
fn drop(&mut self) {
unsafe { CloseHandle(self.0) };
}
}
/// A `Pid` can be turned into a `ProcessHandle` with `OpenProcess`.
impl TryFrom<Pid> for ProcessHandle {
type Error = io::Error;
fn try_from(pid: Pid) -> io::Result<Self> {
let handle = unsafe { OpenProcess(PROCESS_VM_READ, 0, pid) };
if handle == 0 {
Err(io::Error::last_os_error())
} else {
Ok(Self(Arc::new(ProcessHandleInner(handle))))
}
}
}
/// A `std::process::Child` has a `HANDLE` from calling `CreateProcess`.
impl TryFrom<&Child> for ProcessHandle {
type Error = io::Error;
fn try_from(child: &Child) -> io::Result<Self> {
Ok(Self(Arc::new(ProcessHandleInner(
child.as_raw_handle() as HANDLE
))))
}
}
impl From<RawHandle> for ProcessHandle {
fn from(handle: RawHandle) -> Self {
Self(Arc::new(ProcessHandleInner(handle as HANDLE)))
}
}
/// Use `ReadProcessMemory` to read memory from another process on Windows.
impl CopyAddress for ProcessHandle {
fn copy_address(&self, addr: usize, buf: &mut [u8]) -> io::Result<()> {
if buf.is_empty() {
return Ok(());
}
if unsafe {
ReadProcessMemory(
self.0 .0,
addr as *const c_void,
buf.as_mut_ptr().cast(),
mem::size_of_val(buf),
ptr::null_mut(),
)
} == 0
{
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
}
}
/// Copy `length` bytes of memory at `addr` from `source`.
///
/// This is just a convenient way to call `CopyAddress::copy_address` without
/// having to provide your own buffer.
pub fn copy_address<T>(addr: usize, length: usize, source: &T) -> io::Result<Vec<u8>>
where
T: CopyAddress,
{
log::debug!("copy_address: addr: {:x}", addr);
let mut copy = vec![0; length];
source
.copy_address(addr, &mut copy)
.map_err(|e| {
log::warn!("copy_address failed for {:x}: {:?}", addr, e);
e
})
.and(Ok(copy))
}
#[cfg(test)]
mod test {
use super::*;
use std::convert::TryFrom;
use std::env;
use std::io::{self, BufRead, BufReader};
use std::path::PathBuf;
use std::process::{Child, Command, Stdio};
#[allow(unused)]
const fn assert_send_sync<T: Send + Sync>() {}
const _: () = assert_send_sync::<ProcessHandle>();
const _: () = assert_send_sync::<Pid>();
fn test_process_path() -> Option<PathBuf> {
env::current_exe().ok().and_then(|p| {
p.parent().map(|p| {
p.with_file_name("test")
.with_extension(env::consts::EXE_EXTENSION)
})
})
}
fn spawn_with_handle(cmd: &mut Command) -> io::Result<(Child, ProcessHandle)> {
let child = cmd.spawn()?;
let handle = ProcessHandle::try_from(child.id() as Pid)?;
Ok((child, handle))
}
fn read_test_process(args: Option<&[&str]>) -> io::Result<Vec<u8>> {
// Spawn a child process and attempt to read its memory.
let path = test_process_path().unwrap();
let mut cmd = Command::new(&path);
{
cmd.stdin(Stdio::piped()).stdout(Stdio::piped());
}
if let Some(a) = args {
cmd.args(a);
}
let (mut child, handle) = spawn_with_handle(&mut cmd)?;
// The test program prints the address and size.
// See `src/bin/test.rs` for its source.
let reader = BufReader::new(child.stdout.take().unwrap());
let line = reader.lines().next().unwrap().unwrap();
let bits = line.split(' ').collect::<Vec<_>>();
let addr = usize::from_str_radix(&bits[0][2..], 16).unwrap();
let size = bits[1].parse::<usize>().unwrap();
let mem = copy_address(addr, size, &handle)?;
child.wait()?;
Ok(mem)
}
#[test]
fn test_read_small() {
let mem = read_test_process(None).unwrap();
assert_eq!(mem, (0..32u8).collect::<Vec<u8>>());
}
#[test]
fn test_read_large() {
// 20,000 should be greater than a single page on most systems.
// macOS on ARM is 16384.
const SIZE: usize = 20_000;
let arg = format!("{}", SIZE);
let mem = read_test_process(Some(&[&arg])).unwrap();
let expected = (0..SIZE)
.map(|v| (v % (u8::max_value() as usize + 1)) as u8)
.collect::<Vec<u8>>();
assert_eq!(mem, expected);
}
}
| rust | MIT | a116fd5679a7aeee4b2b4d0843ef68c8b82e2fac | 2026-01-04T20:25:01.440670Z | false |
rbspy/read-process-memory | https://github.com/rbspy/read-process-memory/blob/a116fd5679a7aeee4b2b4d0843ef68c8b82e2fac/src/bin/test.rs | src/bin/test.rs | // This test program is used in the tests in src/lib.rs.
use std::env;
use std::io::{self, Read};
fn main() {
let size = env::args()
.nth(1)
.and_then(|a| a.parse::<usize>().ok())
.unwrap_or(32);
let data = if size <= u8::max_value() as usize {
(0..size as u8).collect::<Vec<u8>>()
} else {
(0..size)
.map(|v| (v % (u8::max_value() as usize + 1)) as u8)
.collect::<Vec<u8>>()
};
println!("{:p} {}", data.as_ptr(), data.len());
// Wait to exit until stdin is closed.
let mut buf = vec![];
io::stdin().read_to_end(&mut buf).unwrap();
}
| rust | MIT | a116fd5679a7aeee4b2b4d0843ef68c8b82e2fac | 2026-01-04T20:25:01.440670Z | false |
rbspy/read-process-memory | https://github.com/rbspy/read-process-memory/blob/a116fd5679a7aeee4b2b4d0843ef68c8b82e2fac/examples/read-process-bytes.rs | examples/read-process-bytes.rs | extern crate libc;
extern crate read_process_memory;
use read_process_memory::*;
use std::convert::TryInto;
use std::env;
fn bytes_to_hex(bytes: &[u8]) -> String {
let hex_bytes: Vec<String> = bytes.iter().map(|b| format!("{:02x}", b)).collect();
hex_bytes.join("")
}
fn main() {
let pid = env::args().nth(1).unwrap().parse::<usize>().unwrap() as Pid;
let addr = usize::from_str_radix(&env::args().nth(2).unwrap(), 16).unwrap();
let size = env::args().nth(3).unwrap().parse::<usize>().unwrap();
let handle: ProcessHandle = pid.try_into().unwrap();
copy_address(addr, size, &handle)
.map_err(|e| {
println!("Error: {:?}", e);
e
})
.map(|bytes| {
println!(
"{} bytes at address {:x}:
{}
",
size,
addr,
bytes_to_hex(&bytes)
)
})
.unwrap();
}
| rust | MIT | a116fd5679a7aeee4b2b4d0843ef68c8b82e2fac | 2026-01-04T20:25:01.440670Z | false |
rbspy/read-process-memory | https://github.com/rbspy/read-process-memory/blob/a116fd5679a7aeee4b2b4d0843ef68c8b82e2fac/examples/read-self.rs | examples/read-self.rs | /// Read bytes from the current process.
use read_process_memory::*;
use std::convert::TryInto;
fn main() {
let data = vec![17u8, 23u8, 45u8, 0u8];
let pid = unsafe { libc::getpid() } as Pid;
let addr = data.as_ptr() as usize;
let handle: ProcessHandle = pid.try_into().unwrap();
copy_address(addr, 4, &handle)
.map_err(|e| {
println!("Error: {:?}", e);
e
})
.map(|bytes| {
assert_eq!(bytes, vec![17u8, 23u8, 45u8, 0u8]);
println!("Success!")
})
.unwrap();
}
| rust | MIT | a116fd5679a7aeee4b2b4d0843ef68c8b82e2fac | 2026-01-04T20:25:01.440670Z | false |
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/src/lib.rs | src/lib.rs | #![doc = include_str!("../README.md")]
#![warn(missing_docs)]
use std::marker::PhantomData;
use bevy_ecs::prelude::*;
use moonshine_util::Static;
/// Types, traits, and functions related to loading.
pub mod load;
/// Types, traits, and functions related to saving.
pub mod save;
/// Common elements for saving/loading world state.
pub mod prelude {
pub use crate::load::{
load_on, load_on_default_event, LoadError, LoadEvent, LoadInput, LoadWorld, Loaded,
TriggerLoad, Unload,
};
pub use crate::save::{
save_on, save_on_default_event, Save, SaveError, SaveEvent, SaveOutput, SaveWorld, Saved,
TriggerSave,
};
pub use bevy_ecs::{
entity::{EntityMapper, MapEntities},
reflect::ReflectMapEntities,
};
}
/// A trait used for mapping components during a save operation.
///
/// # Usage
///
/// Component mapping is useful when you wish to serialize an unserializable component.
///
/// All component mappers are executed **BEFORE** the serialization step of the Save Pipeline.
/// When invoked, the given component `T` will be replaced with the output of the mapper for all saved entities.
/// When the save operation is complete, the original component will be restored.
///
/// Keep in mind that this will trigger [change detection](DetectChanges) for the mapped component.
pub trait MapComponent<T: Component>: 'static + Clone + Send + Sync {
/// The mapped output type.
type Output: Component;
/// Called during the Save/Load process to map components.
fn map_component(&self, component: &T) -> Self::Output;
}
impl<F: Fn(&T) -> U, T: Component, U: Component> MapComponent<T> for F
where
F: 'static + Clone + Send + Sync,
{
type Output = U;
fn map_component(&self, component: &T) -> Self::Output {
self(component)
}
}
/// A collection of component mappers. See [`MapComponent`] for more information.
#[derive(Default)]
pub struct SceneMapper(Vec<ComponentMapperDyn>);
impl SceneMapper {
/// Adds a component mapper to the scene mapper.
pub fn map<T: Component>(mut self, m: impl MapComponent<T>) -> Self {
self.0.push(Box::new(ComponentMapperImpl::new(m)));
self
}
pub(crate) fn apply(&mut self, mut entity: EntityWorldMut) {
for mapper in &mut self.0 {
mapper.apply(&mut entity);
}
}
pub(crate) fn replace(&mut self, mut entity: EntityWorldMut) {
for mapper in &mut self.0 {
mapper.replace(&mut entity);
}
}
pub(crate) fn undo(&mut self, mut entity: EntityWorldMut) {
for mapper in &mut self.0 {
mapper.undo(&mut entity);
}
}
}
trait ComponentMapper: Static {
fn apply(&mut self, entity: &mut EntityWorldMut);
fn replace(&mut self, entity: &mut EntityWorldMut);
fn undo(&mut self, entity: &mut EntityWorldMut);
}
struct ComponentMapperImpl<T: Component, M: MapComponent<T>>(M, PhantomData<T>);
impl<T: Component, M: MapComponent<T>> ComponentMapperImpl<T, M> {
fn new(m: M) -> Self {
Self(m, PhantomData)
}
}
impl<T: Component, M: MapComponent<T>> ComponentMapper for ComponentMapperImpl<T, M> {
fn apply(&mut self, entity: &mut EntityWorldMut) {
if let Some(component) = entity.get::<T>() {
entity.insert(self.0.map_component(component));
}
}
fn replace(&mut self, entity: &mut EntityWorldMut) {
if let Some(component) = entity.take::<T>() {
entity.insert(self.0.map_component(&component));
}
}
fn undo(&mut self, entity: &mut EntityWorldMut) {
entity.remove::<M::Output>();
}
}
type ComponentMapperDyn = Box<dyn ComponentMapper>;
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/src/save.rs | src/save.rs | use std::any::TypeId;
use std::io::{self, Write};
use std::marker::PhantomData;
use std::path::PathBuf;
use bevy_ecs::entity::EntityHashSet;
use bevy_ecs::prelude::*;
use bevy_ecs::query::QueryFilter;
use bevy_log::prelude::*;
use bevy_scene::{ron, DynamicScene, DynamicSceneBuilder, SceneFilter};
use moonshine_util::event::{OnSingle, SingleEvent, TriggerSingle};
use moonshine_util::Static;
use thiserror::Error;
use crate::{MapComponent, SceneMapper};
/// A [`Component`] which marks its [`Entity`] to be saved.
#[derive(Component, Default, Debug, Clone)]
pub struct Save;
/// A trait used to trigger a [`SaveEvent`] via [`Commands`] or [`World`].
pub trait TriggerSave {
/// Triggers the given [`SaveEvent`].
#[doc(alias = "trigger_single")]
fn trigger_save(self, event: impl SaveEvent);
}
impl TriggerSave for &mut Commands<'_, '_> {
fn trigger_save(self, event: impl SaveEvent) {
self.trigger_single(event);
}
}
impl TriggerSave for &mut World {
fn trigger_save(self, event: impl SaveEvent) {
self.trigger_single(event);
}
}
/// A [`SingleEvent`] which starts the save process with the given parameters.
///
/// See also:
/// - [`trigger_save`](TriggerSave::trigger_save)
/// - [`trigger_single`](TriggerSingle::trigger_single)
/// - [`SaveWorld`]
pub trait SaveEvent: SingleEvent {
/// A [`QueryFilter`] used as the initial filter for selecting saved entities.
type SaveFilter: QueryFilter;
/// Return `true` if the given [`Entity`] should be saved.
fn filter_entity(&self, _entity: EntityRef) -> bool {
true
}
/// Called once before the save process starts.
///
/// This is useful if you want to modify the world just before saving.
fn before_save(&mut self, _world: &mut World) {}
/// Called once before serialization.
///
/// This is useful to undo any modifications done before saving.
fn before_serialize(&mut self, _world: &mut World, _entities: &[Entity]) {}
/// Returns a [`SceneFilter`] for selecting which components should be saved.
fn component_filter(&mut self) -> SceneFilter {
SceneFilter::allow_all()
}
/// Returns a [`SceneFilter`] for selecting which resources should be saved.
fn resource_filter(&mut self) -> SceneFilter {
SceneFilter::deny_all()
}
/// Called once after serialization.
///
/// This is useful if you would like to do any post-processing of the [`Saved`] data *before* [`OnSave`] is triggered.
fn after_save(&mut self, _world: &mut World, _result: &SaveResult) {}
/// Returns the [`SaveOutput`] of the save process.
fn output(&mut self) -> SaveOutput;
}
/// A generic [`SaveEvent`] which can be used to save the [`World`].
pub struct SaveWorld<F: QueryFilter = DefaultSaveFilter> {
/// A filter for selecting which entities should be saved.
///
/// By default, all entities are selected.
pub entities: EntityFilter,
/// A filter for selecting which resources should be saved.
///
/// By default, no resources are selected. Most Bevy resources are not safely serializable.
pub resources: SceneFilter,
/// A filter for selecting which components should be saved.
///
/// By default, all serializable components are selected.
pub components: SceneFilter,
/// A mapper for transforming components during the save process.
///
/// See [`MapComponent`] for more information.
pub mapper: SceneMapper,
/// Output of the saved world.
pub output: SaveOutput,
#[doc(hidden)]
pub filter: PhantomData<F>,
}
impl<F: QueryFilter> SaveWorld<F> {
/// Creates a new [`SaveWorld`] event with the given [`SaveOutput`].
pub fn new(output: SaveOutput) -> Self {
Self {
entities: EntityFilter::allow_all(),
resources: SceneFilter::deny_all(),
components: SceneFilter::allow_all(),
mapper: SceneMapper::default(),
output,
filter: PhantomData,
}
}
/// Creates a new [`SaveWorld`] event which saves entities matching the
/// given [`QueryFilter`] into a file at the given path.
pub fn into_file(path: impl Into<PathBuf>) -> Self {
Self {
entities: EntityFilter::allow_all(),
resources: SceneFilter::deny_all(),
components: SceneFilter::allow_all(),
mapper: SceneMapper::default(),
output: SaveOutput::file(path),
filter: PhantomData,
}
}
/// Creates a new [`SaveWorld`] event which saves entities matching the
/// given [`QueryFilter`] into a [`Write`] stream.
pub fn into_stream(stream: impl SaveStream) -> Self {
Self {
entities: EntityFilter::allow_all(),
resources: SceneFilter::deny_all(),
components: SceneFilter::allow_all(),
mapper: SceneMapper::default(),
output: SaveOutput::stream(stream),
filter: PhantomData,
}
}
/// Includes the given [`Resource`] in the save data.
pub fn include_resource<R: Resource>(mut self) -> Self {
self.resources = self.resources.allow::<R>();
self
}
/// Includes the given [`Resource`] by its [`TypeId`] in the save data.
pub fn include_resource_by_id(mut self, type_id: TypeId) -> Self {
self.resources = self.resources.allow_by_id(type_id);
self
}
/// Excludes the given [`Component`] from the save data.
pub fn exclude_component<T: Component>(mut self) -> Self {
self.components = self.components.deny::<T>();
self
}
/// Excludes the given [`Component`] by its [`TypeId`] from the save data.
pub fn exclude_component_by_id(mut self, type_id: TypeId) -> Self {
self.components = self.components.deny_by_id(type_id);
self
}
/// Maps the given [`Component`] into another using a [component mapper](MapComponent) before saving.
pub fn map_component<T: Component>(mut self, m: impl MapComponent<T>) -> Self {
self.mapper = self.mapper.map(m);
self
}
}
impl SaveWorld {
/// Creates a new [`SaveWorld`] event which saves default entities (with [`Save`])
/// into a file at the given path.
pub fn default_into_file(path: impl Into<PathBuf>) -> Self {
Self::into_file(path)
}
/// Creates a new [`SaveWorld`] event which saves default entities (with [`Save`])
/// into a [`Write`] stream.
pub fn default_into_stream(stream: impl SaveStream) -> Self {
Self::into_stream(stream)
}
}
impl SaveWorld<()> {
/// Creates a new [`SaveWorld`] event which saves all entities into a file at the given path.
pub fn all_into_file(path: impl Into<PathBuf>) -> Self {
Self::into_file(path)
}
/// Creates a new [`SaveWorld`] event which saves all entities into a [`Write`] stream.
pub fn all_into_stream(stream: impl SaveStream) -> Self {
Self::into_stream(stream)
}
}
impl<F: QueryFilter> SingleEvent for SaveWorld<F> where F: Static {}
impl<F: QueryFilter> SaveEvent for SaveWorld<F>
where
F: Static,
{
type SaveFilter = F;
fn filter_entity(&self, entity: EntityRef) -> bool {
match &self.entities {
EntityFilter::Allow(allow) => allow.contains(&entity.id()),
EntityFilter::Block(block) => !block.contains(&entity.id()),
}
}
fn before_serialize(&mut self, world: &mut World, entities: &[Entity]) {
for entity in entities {
self.mapper.apply(world.entity_mut(*entity));
}
}
fn after_save(&mut self, world: &mut World, result: &SaveResult) {
let Ok(saved) = result else {
return;
};
for entity in saved.entities() {
self.mapper.undo(world.entity_mut(entity));
}
}
fn component_filter(&mut self) -> SceneFilter {
std::mem::replace(&mut self.components, SceneFilter::Unset)
}
fn resource_filter(&mut self) -> SceneFilter {
std::mem::replace(&mut self.resources, SceneFilter::Unset)
}
fn output(&mut self) -> SaveOutput {
self.output.consume().unwrap()
}
}
/// Filter used for the default [`SaveWorld`] event.
/// This includes all entities with the [`Save`] component.
pub type DefaultSaveFilter = With<Save>;
/// Output of the save process.
pub enum SaveOutput {
/// Save into a file at the given path.
File(PathBuf),
/// Save into a [`Write`] stream.
Stream(Box<dyn SaveStream>),
/// Drops the save data.
///
/// This is useful if you would like to process the [`Saved`] data manually.
/// You can observe the [`OnSave`] event for post-processing logic.
Drop,
#[doc(hidden)]
Invalid,
}
impl SaveOutput {
/// Creates a new [`SaveOutput`] which saves into a file at the given path.
pub fn file(path: impl Into<PathBuf>) -> Self {
Self::File(path.into())
}
/// Creates a new [`SaveOutput`] which saves into a [`Write`] stream.
pub fn stream<S: SaveStream + 'static>(stream: S) -> Self {
Self::Stream(Box::new(stream))
}
/// Invalidates this [`SaveOutput`] and returns it if it was valid.
pub fn consume(&mut self) -> Option<SaveOutput> {
let output = std::mem::replace(self, SaveOutput::Invalid);
if let SaveOutput::Invalid = output {
return None;
}
Some(output)
}
}
/// A filter for selecting which [`Entity`]s within a [`World`].
#[derive(Clone, Debug)]
pub enum EntityFilter {
/// Select only the specified entities.
Allow(EntityHashSet),
/// Select all entities except the specified ones.
Block(EntityHashSet),
}
impl EntityFilter {
/// Creates a new [`EntityFilter`] which allows all entities.
pub fn allow_all() -> Self {
Self::Block(EntityHashSet::new())
}
/// Creates a new [`EntityFilter`] which allows only the specified entities.
pub fn allow(entities: impl IntoIterator<Item = Entity>) -> Self {
Self::Allow(entities.into_iter().collect())
}
/// Creates a new [`EntityFilter`] which blocks the specified entities.
pub fn block(entities: impl IntoIterator<Item = Entity>) -> Self {
Self::Block(entities.into_iter().collect())
}
}
impl Default for EntityFilter {
fn default() -> Self {
Self::allow_all()
}
}
/// Alias for a `'static` [`Write`] stream.
pub trait SaveStream: Write
where
Self: Static,
{
}
impl<S: Write> SaveStream for S where S: Static {}
/// An [`Event`] triggered at the end of the save process.
///
/// This event contains the saved [`World`] data as a [`DynamicScene`].
#[derive(Event)]
pub struct Saved {
/// The saved [`DynamicScene`] to be serialized.
pub scene: DynamicScene,
}
impl Saved {
/// Iterates over all the saved entities.
pub fn entities(&self) -> impl Iterator<Item = Entity> + '_ {
self.scene.entities.iter().map(|de| de.entity)
}
}
#[doc(hidden)]
#[deprecated(since = "0.5.2", note = "use `Saved` instead")]
pub type OnSave = Saved;
/// An error that may occur during the save process.
#[derive(Error, Debug)]
pub enum SaveError {
/// An error occurred while serializing the scene.
#[error("Failed to serialize world: {0}")]
Ron(ron::Error),
/// An error occurred while writing into [`SaveOutput`].
#[error("Failed to write world: {0}")]
Io(io::Error),
}
impl From<ron::Error> for SaveError {
fn from(e: ron::Error) -> Self {
Self::Ron(e)
}
}
impl From<io::Error> for SaveError {
fn from(e: io::Error) -> Self {
Self::Io(e)
}
}
/// [`Result`] of a [`SaveEvent`].
pub type SaveResult = Result<Saved, SaveError>;
/// An [`Observer`] which saved the world when a [`SaveWorld`] event is triggered.
pub fn save_on_default_event(event: OnSingle<SaveWorld>, commands: Commands) {
save_on(event, commands);
}
/// An [`Observer`] which saved the world when the given [`SaveEvent`] is triggered.
pub fn save_on<E: SaveEvent>(event: OnSingle<E>, mut commands: Commands) {
commands.queue_handled(SaveCommand(event.consume().unwrap()), |err, ctx| {
error!("save failed: {err:?} ({ctx})");
});
}
fn save_world<E: SaveEvent>(mut event: E, world: &mut World) -> SaveResult {
// Notify
event.before_save(world);
// Filter
let entities: Vec<_> = world
.query_filtered::<Entity, E::SaveFilter>()
.iter(world)
.filter(|entity| event.filter_entity(world.entity(*entity)))
.collect();
// Serialize
event.before_serialize(world, &entities);
let scene = DynamicSceneBuilder::from_world(world)
.with_component_filter(event.component_filter())
.with_resource_filter(event.resource_filter())
.extract_resources()
.extract_entities(entities.iter().copied())
.build();
// Write
let saved = match event.output() {
SaveOutput::File(path) => {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
let type_registry = world.resource::<AppTypeRegistry>().read();
let data = scene.serialize(&type_registry)?;
std::fs::write(&path, data.as_bytes())?;
debug!("saved into file: {path:?}");
Saved { scene }
}
SaveOutput::Stream(mut stream) => {
let type_registry = world.resource::<AppTypeRegistry>().read();
let data = scene.serialize(&type_registry)?;
stream.write_all(data.as_bytes())?;
debug!("saved into stream");
Saved { scene }
}
SaveOutput::Drop => {
debug!("saved data dropped");
Saved { scene }
}
SaveOutput::Invalid => {
panic!("SaveOutput is invalid");
}
};
let result = Ok(saved);
event.after_save(world, &result);
result
}
struct SaveCommand<E>(E);
impl<E: SaveEvent> Command<Result<(), SaveError>> for SaveCommand<E> {
fn apply(self, world: &mut World) -> Result<(), SaveError> {
let saved = save_world(self.0, world)?;
world.trigger(saved);
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::fs::*;
use bevy::prelude::*;
use bevy_ecs::system::RunSystemOnce;
use super::*;
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
#[require(Save)]
struct Foo;
fn app() -> App {
let mut app = App::new();
app.add_plugins(MinimalPlugins).register_type::<Foo>();
app
}
#[test]
fn test_save_into_file() {
#[derive(Resource)]
struct EventTriggered;
pub const PATH: &str = "test_save_into_file.ron";
let mut app = app();
app.add_observer(save_on_default_event);
app.add_observer(|_: On<Saved>, mut commands: Commands| {
commands.insert_resource(EventTriggered);
});
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands.spawn((Foo, Save));
commands.trigger_save(SaveWorld::default_into_file(PATH));
});
let data = read_to_string(PATH).unwrap();
let world = app.world();
assert!(data.contains("Foo"));
assert!(world.contains_resource::<EventTriggered>());
remove_file(PATH).unwrap();
}
#[test]
fn test_save_into_stream() {
pub const PATH: &str = "test_save_to_stream.ron";
let mut app = app();
app.add_observer(save_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands.spawn((Foo, Save));
commands.trigger_save(SaveWorld::default_into_stream(File::create(PATH).unwrap()));
});
let data = read_to_string(PATH).unwrap();
assert!(data.contains("Foo"));
remove_file(PATH).unwrap();
}
#[test]
fn test_save_resource() {
pub const PATH: &str = "test_save_resource.ron";
#[derive(Resource, Default, Reflect)]
#[reflect(Resource)]
struct Bar;
let mut app = app();
app.register_type::<Bar>()
.add_observer(save_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands.insert_resource(Bar);
commands.trigger_save(
SaveWorld::default_into_stream(File::create(PATH).unwrap())
.include_resource::<Bar>(),
);
});
app.update();
let data = read_to_string(PATH).unwrap();
assert!(data.contains("Bar"));
remove_file(PATH).unwrap();
}
#[test]
fn test_save_without_component() {
pub const PATH: &str = "test_save_without_component.ron";
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
#[require(Save)]
struct Baz;
let mut app = app();
app.add_observer(save_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands.spawn((Foo, Baz, Save));
commands.trigger_save(SaveWorld::default_into_file(PATH).exclude_component::<Baz>());
});
let data = read_to_string(PATH).unwrap();
assert!(data.contains("Foo"));
assert!(!data.contains("Baz"));
remove_file(PATH).unwrap();
}
#[test]
fn test_map_component() {
pub const PATH: &str = "test_map_component.ron";
#[derive(Component, Default)]
struct Bar(#[allow(dead_code)] u32); // Not serializable
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
struct Baz(u32); // Serializable
let mut app = app();
app.register_type::<Baz>()
.add_observer(save_on_default_event);
let entity = app
.world_mut()
.run_system_once(|mut commands: Commands| {
let entity = commands.spawn((Bar(12), Save)).id();
commands.trigger_save(
SaveWorld::default_into_file(PATH).map_component::<Bar>(|Bar(i): &Bar| Baz(*i)),
);
entity
})
.unwrap();
let data = read_to_string(PATH).unwrap();
assert!(data.contains("Baz"));
assert!(data.contains("(12)"));
assert!(!data.contains("Bar"));
assert!(app.world().entity(entity).contains::<Bar>());
assert!(!app.world().entity(entity).contains::<Baz>());
remove_file(PATH).unwrap();
}
}
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.