repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/http_transport/reqwest_transport.rs
ic-agent/src/agent/http_transport/reqwest_transport.rs
//! This module has been deprecated in favor of builder methods on `AgentBuilder`. #![allow(deprecated)] pub use reqwest; use std::sync::Arc; use reqwest::Client; use crate::{ agent::{ route_provider::{RoundRobinRouteProvider, RouteProvider}, AgentBuilder, }, AgentError, }; /// A legacy configuration object. `AgentBuilder::with_transport` will apply these settings to the builder. #[derive(Debug, Clone)] pub struct ReqwestTransport { route_provider: Arc<dyn RouteProvider>, client: Client, max_response_body_size: Option<usize>, max_tcp_error_retries: usize, } impl ReqwestTransport { /// Equivalent to [`AgentBuilder::with_url`]. #[deprecated(since = "0.38.0", note = "Use AgentBuilder::with_url")] pub fn create<U: Into<String>>(url: U) -> Result<Self, AgentError> { #[cfg(not(target_family = "wasm"))] { Self::create_with_client( url, Client::builder() .use_rustls_tls() .timeout(std::time::Duration::from_secs(360)) .build() .expect("Could not create HTTP client."), ) } #[cfg(all(target_family = "wasm", feature = "wasm-bindgen"))] { Self::create_with_client(url, Client::new()) } } /// Equivalent to [`AgentBuilder::with_url`] and [`AgentBuilder::with_http_client`]. #[deprecated( since = "0.38.0", note = "Use AgentBuilder::with_url and AgentBuilder::with_http_client" )] pub fn create_with_client<U: Into<String>>(url: U, client: Client) -> Result<Self, AgentError> { let route_provider = Arc::new(RoundRobinRouteProvider::new(vec![url.into()])?); Self::create_with_client_route(route_provider, client) } /// Equivalent to [`AgentBuilder::with_http_client`] and [`AgentBuilder::with_route_provider`]. #[deprecated( since = "0.38.0", note = "Use AgentBuilder::with_http_client and AgentBuilder::with_arc_route_provider" )] pub fn create_with_client_route( route_provider: Arc<dyn RouteProvider>, client: Client, ) -> Result<Self, AgentError> { Ok(Self { route_provider, client, max_response_body_size: None, max_tcp_error_retries: 0, }) } /// Equivalent to [`AgentBuilder::with_max_response_body_size`]. #[deprecated( since = "0.38.0", note = "Use AgentBuilder::with_max_response_body_size" )] pub fn with_max_response_body_size(self, max_response_body_size: usize) -> Self { ReqwestTransport { max_response_body_size: Some(max_response_body_size), ..self } } /// Equivalent to [`AgentBuilder::with_max_tcp_error_retries`]. #[deprecated( since = "0.38.0", note = "Use AgentBuilder::with_max_tcp_error_retries" )] pub fn with_max_tcp_errors_retries(self, retries: usize) -> Self { ReqwestTransport { max_tcp_error_retries: retries, ..self } } } impl AgentBuilder { #[doc(hidden)] #[deprecated(since = "0.38.0", note = "Use the dedicated methods on AgentBuilder")] pub fn with_transport(self, transport: ReqwestTransport) -> Self { let mut builder = self .with_arc_route_provider(transport.route_provider) .with_http_client(transport.client) .with_max_tcp_error_retries(transport.max_tcp_error_retries); if let Some(max_size) = transport.max_response_body_size { builder = builder.with_max_response_body_size(max_size); } builder } #[doc(hidden)] #[deprecated(since = "0.38.0", note = "Use the dedicated methods on AgentBuilder")] pub fn with_arc_transport(self, transport: Arc<ReqwestTransport>) -> Self { self.with_transport((*transport).clone()) } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/http_transport/mod.rs
ic-agent/src/agent/http_transport/mod.rs
//! This module has been deprecated in favor of builder methods on `AgentBuilder`. #[deprecated(since = "0.38.0", note = "use the AgentBuilder methods")] #[doc(hidden)] pub mod reqwest_transport; #[doc(hidden)] #[allow(deprecated)] pub use reqwest_transport::ReqwestTransport;
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/messages.rs
ic-agent/src/agent/route_provider/dynamic_routing/messages.rs
use crate::agent::route_provider::dynamic_routing::{health_check::HealthCheckStatus, node::Node}; /// Represents a message with fetched nodes. #[derive(Debug, Clone)] pub struct FetchedNodes { /// The fetched nodes. pub nodes: Vec<Node>, } /// Represents a message with the health state of a node. pub struct NodeHealthState { /// The node. pub node: Node, /// The health state of the node. pub health: HealthCheckStatus, }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/node.rs
ic-agent/src/agent/route_provider/dynamic_routing/node.rs
use url::Url; use crate::agent::ApiBoundaryNode; /// Represents a node in the dynamic routing. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Node { domain: String, } impl Node { /// Creates a new `Node` instance from the domain name. pub fn new(domain: impl Into<String>) -> Result<Self, url::ParseError> { let domain = domain.into(); check_valid_domain(&domain)?; Ok(Self { domain }) } /// Returns the domain name of the node. pub fn domain(&self) -> &str { &self.domain } } impl Node { /// Converts the node to a routing URL. pub fn to_routing_url(&self) -> Url { Url::parse(&format!("https://{}", self.domain)).expect("failed to parse URL") } } impl From<&Node> for Url { fn from(node: &Node) -> Self { // Parsing can't fail, as the domain was checked at node instantiation. Url::parse(&format!("https://{}", node.domain)).expect("failed to parse URL") } } impl TryFrom<ApiBoundaryNode> for Node { type Error = url::ParseError; fn try_from(value: ApiBoundaryNode) -> Result<Self, Self::Error> { Node::new(value.domain) } } /// Checks if the given domain is a valid URL. fn check_valid_domain<S: AsRef<str>>(domain: S) -> Result<(), url::ParseError> { // Prepend scheme to make it a valid URL let url_string = format!("http://{}", domain.as_ref()); Url::parse(&url_string)?; Ok(()) }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/type_aliases.rs
ic-agent/src/agent/route_provider/dynamic_routing/type_aliases.rs
use arc_swap::ArcSwap; use std::sync::Arc; /// A type alias for the sender end of a watch channel. pub(super) type SenderWatch<T> = async_watch::Sender<Option<T>>; /// A type alias for the receiver end of a watch channel. pub(super) type ReceiverWatch<T> = async_watch::Receiver<Option<T>>; /// A type alias for the sender end of a multi-producer, single-consumer channel. pub(super) type SenderMpsc<T> = async_channel::Sender<T>; /// A type alias for the receiver end of a multi-producer, single-consumer channel. pub(super) type ReceiverMpsc<T> = async_channel::Receiver<T>; /// A type alias for an atomic swap operation on a shared value. pub(super) type AtomicSwap<T> = Arc<ArcSwap<T>>;
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/test_utils.rs
ic-agent/src/agent/route_provider/dynamic_routing/test_utils.rs
use std::collections::{HashMap, HashSet}; use std::time::Duration; use std::{fmt::Debug, hash::Hash, sync::Arc}; use arc_swap::ArcSwap; use async_trait::async_trait; use url::Url; use crate::agent::route_provider::{ dynamic_routing::{ dynamic_route_provider::DynamicRouteProviderError, health_check::{HealthCheck, HealthCheckStatus}, node::Node, nodes_fetch::Fetch, type_aliases::AtomicSwap, }, RouteProvider, }; pub(super) fn route_n_times(n: usize, f: Arc<impl RouteProvider + ?Sized>) -> Vec<String> { (0..n) .map(|_| f.route().unwrap().domain().unwrap().to_string()) .collect() } pub(super) fn assert_routed_domains<T>( actual: Vec<T>, expected: Vec<&str>, expected_repetitions: usize, ) where T: AsRef<str> + Eq + Hash + Debug + Ord, { fn build_count_map<T>(items: &[T]) -> HashMap<&str, usize> where T: AsRef<str>, { items.iter().fold(HashMap::new(), |mut map, item| { *map.entry(item.as_ref()).or_insert(0) += 1; map }) } let count_actual = build_count_map(&actual); let count_expected = build_count_map(&expected); let mut keys_actual = count_actual.keys().collect::<Vec<_>>(); keys_actual.sort(); let mut keys_expected = count_expected.keys().collect::<Vec<_>>(); keys_expected.sort(); // Assert all routed domains are present. assert_eq!(keys_actual, keys_expected); // Assert the expected repetition count of each routed domain. let actual_repetitions = count_actual.values().collect::<Vec<_>>(); assert!(actual_repetitions .iter() .all(|&x| x == &expected_repetitions)); } #[derive(Debug)] pub(super) struct NodesFetcherMock { // A set of nodes, existing in the topology. pub nodes: AtomicSwap<Vec<Node>>, } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl Fetch for NodesFetcherMock { async fn fetch(&self, _url: Url) -> Result<Vec<Node>, DynamicRouteProviderError> { let nodes = (*self.nodes.load_full()).clone(); Ok(nodes) } } impl Default for NodesFetcherMock { fn default() -> Self { Self::new() } } impl NodesFetcherMock { pub fn new() -> Self { Self { nodes: Arc::new(ArcSwap::from_pointee(vec![])), } } pub fn overwrite_nodes(&self, nodes: Vec<Node>) { self.nodes.store(Arc::new(nodes)); } } #[derive(Debug)] pub(super) struct NodeHealthCheckerMock { healthy_nodes: Arc<ArcSwap<HashSet<Node>>>, } impl Default for NodeHealthCheckerMock { fn default() -> Self { Self::new() } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl HealthCheck for NodeHealthCheckerMock { async fn check(&self, node: &Node) -> Result<HealthCheckStatus, DynamicRouteProviderError> { let nodes = self.healthy_nodes.load_full(); let latency = match nodes.contains(node) { true => Some(Duration::from_secs(1)), false => None, }; Ok(HealthCheckStatus::new(latency)) } } impl NodeHealthCheckerMock { pub fn new() -> Self { Self { healthy_nodes: Arc::new(ArcSwap::from_pointee(HashSet::new())), } } pub fn overwrite_healthy_nodes(&self, healthy_nodes: Vec<Node>) { self.healthy_nodes .store(Arc::new(HashSet::from_iter(healthy_nodes))); } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/dynamic_route_provider.rs
ic-agent/src/agent/route_provider/dynamic_routing/dynamic_route_provider.rs
//! An implementation of [`RouteProvider`] for dynamic generation of routing urls. use std::{ sync::Arc, time::{Duration, Instant}, }; use arc_swap::ArcSwap; use candid::Principal; use futures_util::FutureExt; use stop_token::StopSource; use thiserror::Error; use url::Url; use crate::{ agent::{ route_provider::{ dynamic_routing::{ health_check::{HealthCheck, HealthChecker, HealthManagerActor}, messages::FetchedNodes, node::Node, nodes_fetch::{Fetch, NodesFetchActor, NodesFetcher}, snapshot::routing_snapshot::RoutingSnapshot, type_aliases::AtomicSwap, }, RouteProvider, RoutesStats, }, HttpService, }, AgentError, }; /// The default seed domain for boundary node discovery. #[allow(unused)] pub const IC0_SEED_DOMAIN: &str = "ic0.app"; const MAINNET_ROOT_SUBNET_ID: &str = "tdb26-jop6k-aogll-7ltgs-eruif-6kk7m-qpktf-gdiqx-mxtrf-vb5e6-eqe"; const FETCH_PERIOD: Duration = Duration::from_secs(5); const FETCH_RETRY_INTERVAL: Duration = Duration::from_millis(250); const TIMEOUT_AWAIT_HEALTHY_SEED: Duration = Duration::from_millis(1000); #[allow(unused)] const HEALTH_CHECK_TIMEOUT: Duration = Duration::from_secs(1); const HEALTH_CHECK_PERIOD: Duration = Duration::from_secs(1); #[allow(unused)] const DYNAMIC_ROUTE_PROVIDER: &str = "DynamicRouteProvider"; /// A dynamic route provider. /// It spawns the discovery service (`NodesFetchActor`) for fetching the latest nodes topology. /// It also spawns the `HealthManagerActor`, which orchestrates the health check tasks for each node and updates routing snapshot. #[derive(Debug)] pub struct DynamicRouteProvider<S> { /// Fetcher for fetching the latest nodes topology. fetcher: Arc<dyn Fetch>, /// Periodicity of fetching the latest nodes topology. fetch_period: Duration, /// Interval for retrying fetching the nodes in case of error. fetch_retry_interval: Duration, /// Health checker for checking the health of the nodes. checker: Arc<dyn HealthCheck>, /// Periodicity of checking the health of the nodes. check_period: Duration, /// Snapshot of the routing nodes. routing_snapshot: AtomicSwap<S>, /// Initial seed nodes, which are used for the initial fetching of the nodes. seeds: Vec<Node>, /// Cancellation token for stopping the spawned tasks. token: StopSource, } /// An error that occurred when the `DynamicRouteProvider` service was running. #[derive(Error, Debug)] pub enum DynamicRouteProviderError { /// An error when fetching topology of the API nodes. #[error("An error when fetching API nodes: {0}")] NodesFetchError(String), /// An error when checking API node's health. #[error("An error when checking API node's health: {0}")] HealthCheckError(String), } /// A builder for the `DynamicRouteProvider`. pub struct DynamicRouteProviderBuilder<S> { fetcher: Arc<dyn Fetch>, fetch_period: Duration, fetch_retry_interval: Duration, checker: Arc<dyn HealthCheck>, check_period: Duration, routing_snapshot: AtomicSwap<S>, seeds: Vec<Node>, } impl<S> DynamicRouteProviderBuilder<S> { /// Creates a new instance of the builder. pub fn new(snapshot: S, seeds: Vec<Node>, http_client: Arc<dyn HttpService>) -> Self { let fetcher = Arc::new(NodesFetcher::new( http_client.clone(), Principal::from_text(MAINNET_ROOT_SUBNET_ID).unwrap(), None, )); let checker = Arc::new(HealthChecker::new( http_client, #[cfg(not(target_family = "wasm"))] HEALTH_CHECK_TIMEOUT, )); Self { fetcher, fetch_period: FETCH_PERIOD, fetch_retry_interval: FETCH_RETRY_INTERVAL, checker, check_period: HEALTH_CHECK_PERIOD, seeds, routing_snapshot: Arc::new(ArcSwap::from_pointee(snapshot)), } } /// Sets the fetcher of the nodes in the topology. #[allow(unused)] pub fn with_fetcher(mut self, fetcher: Arc<dyn Fetch>) -> Self { self.fetcher = fetcher; self } /// Sets the fetching periodicity. pub fn with_fetch_period(mut self, period: Duration) -> Self { self.fetch_period = period; self } /// Sets the node health checker. #[allow(unused)] pub fn with_checker(mut self, checker: Arc<dyn HealthCheck>) -> Self { self.checker = checker; self } /// Sets the periodicity of node health checking. pub fn with_check_period(mut self, period: Duration) -> Self { self.check_period = period; self } /// Builds an instance of the `DynamicRouteProvider`. pub async fn build(self) -> DynamicRouteProvider<S> where S: RoutingSnapshot + 'static, { let route_provider = DynamicRouteProvider { fetcher: self.fetcher, fetch_period: self.fetch_period, fetch_retry_interval: self.fetch_retry_interval, checker: self.checker, check_period: self.check_period, routing_snapshot: self.routing_snapshot, seeds: self.seeds, token: StopSource::new(), }; route_provider.run().await; route_provider } } impl<S> RouteProvider for DynamicRouteProvider<S> where S: RoutingSnapshot + 'static, { fn route(&self) -> Result<Url, AgentError> { let snapshot = self.routing_snapshot.load(); let node = snapshot.next_node().ok_or_else(|| { AgentError::RouteProviderError("No healthy API nodes found.".to_string()) })?; Ok(node.to_routing_url()) } fn n_ordered_routes(&self, n: usize) -> Result<Vec<Url>, AgentError> { let snapshot = self.routing_snapshot.load(); let nodes = snapshot.next_n_nodes(n); if nodes.is_empty() { return Err(AgentError::RouteProviderError( "No healthy API nodes found.".to_string(), )); }; let urls = nodes.iter().map(|n| n.to_routing_url()).collect(); Ok(urls) } fn routes_stats(&self) -> RoutesStats { let snapshot = self.routing_snapshot.load(); snapshot.routes_stats() } } impl<S> DynamicRouteProvider<S> where S: RoutingSnapshot + 'static, { /// Starts two background tasks: /// - Task1: `NodesFetchActor` /// - Periodically fetches existing API nodes (gets latest nodes topology) and sends discovered nodes to `HealthManagerActor`. /// - Task2: `HealthManagerActor`: /// - Listens to the fetched nodes messages from the `NodesFetchActor`. /// - Starts/stops health check tasks (`HealthCheckActors`) based on the newly added/removed nodes. /// - These spawned health check tasks periodically update the snapshot with the latest node health info. pub async fn run(&self) { log!(info, "{DYNAMIC_ROUTE_PROVIDER}: started ..."); // Communication channel between NodesFetchActor and HealthManagerActor. let (fetch_sender, fetch_receiver) = async_watch::channel(None); // Communication channel with HealthManagerActor to receive info about healthy seed nodes (used only once). let (init_sender, init_receiver) = async_channel::bounded(1); // Start the receiving part first. let health_manager_actor = HealthManagerActor::new( Arc::clone(&self.checker), self.check_period, Arc::clone(&self.routing_snapshot), fetch_receiver, init_sender, self.token.token(), ); crate::util::spawn(async move { health_manager_actor.run().await }); // Dispatch all seed nodes for initial health checks if let Err(_err) = fetch_sender.send(Some(FetchedNodes { nodes: self.seeds.clone(), })) { log!( error, "{DYNAMIC_ROUTE_PROVIDER}: failed to send results to HealthManager: {_err:?}" ); } // Try await for healthy seeds. let _start = Instant::now(); futures_util::select! { _ = crate::util::sleep(TIMEOUT_AWAIT_HEALTHY_SEED).fuse() => { log!( warn, "{DYNAMIC_ROUTE_PROVIDER}: no healthy seeds found within {:?}", _start.elapsed() ); } _ = init_receiver.recv().fuse() => { log!( info, "{DYNAMIC_ROUTE_PROVIDER}: found healthy seeds within {:?}", _start.elapsed() ); } } // We can close the channel now. init_receiver.close(); let fetch_actor = NodesFetchActor::new( Arc::clone(&self.fetcher), self.fetch_period, self.fetch_retry_interval, fetch_sender, Arc::clone(&self.routing_snapshot), self.token.token(), ); crate::util::spawn(async move { fetch_actor.run().await }); log!( info, "{DYNAMIC_ROUTE_PROVIDER}: NodesFetchActor and HealthManagerActor started successfully" ); } } #[cfg(all(test, not(target_family = "wasm")))] mod tests { use candid::Principal; use reqwest::Client; use std::{ sync::{Arc, Once}, time::{Duration, Instant}, }; use tracing::Level; use tracing_subscriber::FmtSubscriber; use crate::{ agent::route_provider::{ dynamic_routing::{ dynamic_route_provider::{ DynamicRouteProviderBuilder, IC0_SEED_DOMAIN, MAINNET_ROOT_SUBNET_ID, }, node::Node, snapshot::{ latency_based_routing::LatencyRoutingSnapshot, round_robin_routing::RoundRobinRoutingSnapshot, }, test_utils::{ assert_routed_domains, route_n_times, NodeHealthCheckerMock, NodesFetcherMock, }, }, RouteProvider, RoutesStats, }, Agent, AgentError, }; static TRACING_INIT: Once = Once::new(); pub fn setup_tracing() { TRACING_INIT.call_once(|| { FmtSubscriber::builder() .with_max_level(Level::TRACE) .with_test_writer() .init(); }); } async fn assert_no_routing_via_domains( route_provider: Arc<dyn RouteProvider>, excluded_domains: Vec<&str>, timeout: Duration, route_call_interval: Duration, ) { if excluded_domains.is_empty() { panic!("List of excluded domains can't be empty"); } let route_calls = 30; let start = Instant::now(); while start.elapsed() < timeout { let routed_domains = (0..route_calls) .map(|_| { route_provider.route().map(|url| { let domain = url.domain().expect("no domain name in url"); domain.to_string() }) }) .collect::<Result<Vec<String>, _>>() .unwrap_or_default(); // Exit when excluded domains are not used for routing any more. if !routed_domains.is_empty() && !routed_domains .iter() .any(|d| excluded_domains.contains(&d.as_str())) { return; } tokio::time::sleep(route_call_interval).await; } panic!("Expected excluded domains {excluded_domains:?} are still observed in routing over the last {route_calls} calls"); } #[tokio::test] async fn test_mainnet() { // Setup. setup_tracing(); let seed = Node::new(IC0_SEED_DOMAIN).unwrap(); let client = Client::builder().build().unwrap(); let route_provider = DynamicRouteProviderBuilder::new( LatencyRoutingSnapshot::new(), vec![seed], Arc::new(client.clone()), ) .build() .await; let route_provider = Arc::new(route_provider) as Arc<dyn RouteProvider>; let agent = Agent::builder() .with_arc_route_provider(Arc::clone(&route_provider)) .with_http_client(client) .build() .expect("failed to create an agent"); let subnet_id = Principal::from_text(MAINNET_ROOT_SUBNET_ID).unwrap(); // Assert that seed (ic0.app) is not used for routing. Henceforth, only discovered API nodes are used. assert_no_routing_via_domains( Arc::clone(&route_provider), vec![IC0_SEED_DOMAIN], Duration::from_secs(40), Duration::from_secs(2), ) .await; // Act: perform /read_state call via dynamically discovered API BNs. let api_bns = agent .fetch_api_boundary_nodes_by_subnet_id(subnet_id) .await .expect("failed to fetch api boundary nodes"); assert!(!api_bns.is_empty()); } #[tokio::test] async fn test_routing_with_topology_and_node_health_updates() { // Setup. setup_tracing(); let node_1 = Node::new(IC0_SEED_DOMAIN).unwrap(); // Set nodes fetching params: topology, fetching periodicity. let fetcher = Arc::new(NodesFetcherMock::new()); fetcher.overwrite_nodes(vec![node_1.clone()]); let fetch_interval = Duration::from_secs(2); // Set health checking params: healthy nodes, checking periodicity. let checker = Arc::new(NodeHealthCheckerMock::new()); let check_interval = Duration::from_secs(1); // A single healthy node exists in the topology. This node happens to be the seed node. fetcher.overwrite_nodes(vec![node_1.clone()]); checker.overwrite_healthy_nodes(vec![node_1.clone()]); // Configure RouteProvider let snapshot = RoundRobinRoutingSnapshot::new(); let client = Client::builder().build().unwrap(); let route_provider = DynamicRouteProviderBuilder::new(snapshot, vec![node_1.clone()], Arc::new(client)) .with_fetcher(fetcher.clone()) .with_checker(checker.clone()) .with_fetch_period(fetch_interval) .with_check_period(check_interval) .build() .await; let route_provider = Arc::new(route_provider); // This time span is required for the snapshot to be fully updated with the new nodes and their health info. let snapshot_update_duration = fetch_interval + 2 * check_interval; // Test 1: multiple route() calls return a single domain=ic0.app. // Only a single node exists, which is initially healthy. tokio::time::sleep(snapshot_update_duration).await; let routed_domains = route_n_times(6, Arc::clone(&route_provider)); assert_routed_domains(routed_domains, vec![node_1.domain()], 6); assert_eq!(route_provider.routes_stats(), RoutesStats::new(1, Some(1))); // Test 2: multiple route() calls return 3 different domains with equal fairness (repetition). // Two healthy nodes are added to the topology. let node_2 = Node::new("api1.com").unwrap(); let node_3 = Node::new("api2.com").unwrap(); checker.overwrite_healthy_nodes(vec![node_1.clone(), node_2.clone(), node_3.clone()]); fetcher.overwrite_nodes(vec![node_1.clone(), node_2.clone(), node_3.clone()]); tokio::time::sleep(snapshot_update_duration).await; let routed_domains = route_n_times(6, Arc::clone(&route_provider)); assert_routed_domains( routed_domains, vec![node_1.domain(), node_2.domain(), node_3.domain()], 2, ); assert_eq!(route_provider.routes_stats(), RoutesStats::new(3, Some(3))); // Test 3: multiple route() calls return 2 different domains with equal fairness (repetition). // One node is set to unhealthy. checker.overwrite_healthy_nodes(vec![node_1.clone(), node_3.clone()]); tokio::time::sleep(snapshot_update_duration).await; let routed_domains = route_n_times(6, Arc::clone(&route_provider)); assert_routed_domains(routed_domains, vec![node_1.domain(), node_3.domain()], 3); assert_eq!(route_provider.routes_stats(), RoutesStats::new(3, Some(2))); // Test 4: multiple route() calls return 3 different domains with equal fairness (repetition). // Unhealthy node is set back to healthy. checker.overwrite_healthy_nodes(vec![node_1.clone(), node_2.clone(), node_3.clone()]); tokio::time::sleep(snapshot_update_duration).await; let routed_domains = route_n_times(6, Arc::clone(&route_provider)); assert_routed_domains( routed_domains, vec![node_1.domain(), node_2.domain(), node_3.domain()], 2, ); assert_eq!(route_provider.routes_stats(), RoutesStats::new(3, Some(3))); // Test 5: multiple route() calls return 3 different domains with equal fairness (repetition). // One healthy node is added, but another one goes unhealthy. let node_4 = Node::new("api3.com").unwrap(); checker.overwrite_healthy_nodes(vec![node_2.clone(), node_3.clone(), node_4.clone()]); fetcher.overwrite_nodes(vec![ node_1.clone(), node_2.clone(), node_3.clone(), node_4.clone(), ]); tokio::time::sleep(snapshot_update_duration).await; let routed_domains = route_n_times(6, Arc::clone(&route_provider)); assert_routed_domains( routed_domains, vec![node_2.domain(), node_3.domain(), node_4.domain()], 2, ); assert_eq!(route_provider.routes_stats(), RoutesStats::new(4, Some(3))); // Test 6: multiple route() calls return a single domain=api1.com. // One node is set to unhealthy and one is removed from the topology. checker.overwrite_healthy_nodes(vec![node_2.clone(), node_3.clone()]); fetcher.overwrite_nodes(vec![node_1.clone(), node_2.clone(), node_4.clone()]); tokio::time::sleep(snapshot_update_duration).await; let routed_domains = route_n_times(3, Arc::clone(&route_provider)); assert_routed_domains(routed_domains, vec![node_2.domain()], 3); assert_eq!(route_provider.routes_stats(), RoutesStats::new(3, Some(1))); } #[tokio::test] async fn test_route_with_initially_unhealthy_seeds_becoming_healthy() { // Setup. setup_tracing(); let node_1 = Node::new(IC0_SEED_DOMAIN).unwrap(); let node_2 = Node::new("api1.com").unwrap(); // Set nodes fetching params: topology, fetching periodicity. let fetcher = Arc::new(NodesFetcherMock::new()); let fetch_interval = Duration::from_secs(2); // Set health checking params: healthy nodes, checking periodicity. let checker = Arc::new(NodeHealthCheckerMock::new()); let check_interval = Duration::from_secs(1); // Two nodes exist, which are initially unhealthy. fetcher.overwrite_nodes(vec![node_1.clone(), node_2.clone()]); checker.overwrite_healthy_nodes(vec![]); // Configure RouteProvider let snapshot = RoundRobinRoutingSnapshot::new(); let client = Client::builder().build().unwrap(); let route_provider = DynamicRouteProviderBuilder::new( snapshot, vec![node_1.clone(), node_2.clone()], Arc::new(client), ) .with_fetcher(fetcher) .with_checker(checker.clone()) .with_fetch_period(fetch_interval) .with_check_period(check_interval) .build() .await; let route_provider = Arc::new(route_provider); // Test 1: calls to route() return an error, as no healthy seeds exist. for _ in 0..4 { tokio::time::sleep(check_interval).await; let result = route_provider.route(); assert_eq!( result.unwrap_err(), AgentError::RouteProviderError("No healthy API nodes found.".to_string()) ); } // Test 2: calls to route() return both seeds, as they become healthy. checker.overwrite_healthy_nodes(vec![node_1.clone(), node_2.clone()]); tokio::time::sleep(3 * check_interval).await; let routed_domains = route_n_times(6, Arc::clone(&route_provider)); assert_routed_domains(routed_domains, vec![node_1.domain(), node_2.domain()], 3); } #[tokio::test] async fn test_routing_with_no_healthy_nodes_returns_an_error() { // Setup. setup_tracing(); let node_1 = Node::new(IC0_SEED_DOMAIN).unwrap(); // Set nodes fetching params: topology, fetching periodicity. let fetcher = Arc::new(NodesFetcherMock::new()); let fetch_interval = Duration::from_secs(2); // Set health checking params: healthy nodes, checking periodicity. let checker = Arc::new(NodeHealthCheckerMock::new()); let check_interval = Duration::from_secs(1); // A single seed node which is initially healthy. fetcher.overwrite_nodes(vec![node_1.clone()]); checker.overwrite_healthy_nodes(vec![node_1.clone()]); // Configure RouteProvider let snapshot = RoundRobinRoutingSnapshot::new(); let client = Client::builder().build().unwrap(); let route_provider = DynamicRouteProviderBuilder::new(snapshot, vec![node_1.clone()], Arc::new(client)) .with_fetcher(fetcher) .with_checker(checker.clone()) .with_fetch_period(fetch_interval) .with_check_period(check_interval) .build() .await; let route_provider = Arc::new(route_provider); // Test 1: multiple route() calls return a single domain=ic0.app, as the seed is healthy. tokio::time::sleep(2 * check_interval).await; let routed_domains = route_n_times(3, Arc::clone(&route_provider)); assert_routed_domains(routed_domains, vec![node_1.domain()], 3); // Test 2: calls to route() return an error, as no healthy nodes exist. checker.overwrite_healthy_nodes(vec![]); tokio::time::sleep(2 * check_interval).await; for _ in 0..4 { let result = route_provider.route(); assert_eq!( result.unwrap_err(), AgentError::RouteProviderError("No healthy API nodes found.".to_string()) ); } } #[tokio::test] async fn test_route_with_no_healthy_seeds_errors() { // Setup. setup_tracing(); let node_1 = Node::new(IC0_SEED_DOMAIN).unwrap(); // Set nodes fetching params: topology, fetching periodicity. let fetcher = Arc::new(NodesFetcherMock::new()); let fetch_interval = Duration::from_secs(2); // Set health checking params: healthy nodes, checking periodicity. let checker = Arc::new(NodeHealthCheckerMock::new()); let check_interval = Duration::from_secs(1); // No healthy seed nodes present, this should lead to errors. fetcher.overwrite_nodes(vec![]); checker.overwrite_healthy_nodes(vec![]); // Configure RouteProvider let snapshot = RoundRobinRoutingSnapshot::new(); let client = Client::builder().build().unwrap(); let route_provider = DynamicRouteProviderBuilder::new(snapshot, vec![node_1.clone()], Arc::new(client)) .with_fetcher(fetcher) .with_checker(checker) .with_fetch_period(fetch_interval) .with_check_period(check_interval) .build() .await; // Test: calls to route() return an error, as no healthy seeds exist. for _ in 0..4 { tokio::time::sleep(check_interval).await; let result = route_provider.route(); assert_eq!( result.unwrap_err(), AgentError::RouteProviderError("No healthy API nodes found.".to_string()) ); } } #[tokio::test] async fn test_route_with_one_healthy_and_one_unhealthy_seed() { // Setup. setup_tracing(); let node_1 = Node::new(IC0_SEED_DOMAIN).unwrap(); let node_2 = Node::new("api1.com").unwrap(); // Set nodes fetching params: topology, fetching periodicity. let fetcher = Arc::new(NodesFetcherMock::new()); let fetch_interval = Duration::from_secs(2); // Set health checking params: healthy nodes, checking periodicity. let checker = Arc::new(NodeHealthCheckerMock::new()); let check_interval = Duration::from_secs(1); // One healthy seed is present, it should be discovered during the initialization time. fetcher.overwrite_nodes(vec![node_1.clone(), node_2.clone()]); checker.overwrite_healthy_nodes(vec![node_1.clone()]); // Configure RouteProvider let snapshot = RoundRobinRoutingSnapshot::new(); let client = Client::builder().build().unwrap(); let route_provider = DynamicRouteProviderBuilder::new( snapshot, vec![node_1.clone(), node_2.clone()], Arc::new(client), ) .with_fetcher(fetcher) .with_checker(checker.clone()) .with_fetch_period(fetch_interval) .with_check_period(check_interval) .build() .await; let route_provider = Arc::new(route_provider); // Test 1: calls to route() return only a healthy seed ic0.app. let routed_domains = route_n_times(3, Arc::clone(&route_provider)); assert_routed_domains(routed_domains, vec![node_1.domain()], 3); // Test 2: calls to route() return two healthy seeds, as the unhealthy seed becomes healthy. checker.overwrite_healthy_nodes(vec![node_1.clone(), node_2.clone()]); tokio::time::sleep(2 * check_interval).await; let routed_domains = route_n_times(6, Arc::clone(&route_provider)); assert_routed_domains(routed_domains, vec![node_1.domain(), node_2.domain()], 3); } #[tokio::test] async fn test_routing_with_an_empty_fetched_list_of_api_nodes() { // Check resiliency to an empty list of fetched API nodes (this should never happen in normal IC operation). // Setup. setup_tracing(); let node_1 = Node::new(IC0_SEED_DOMAIN).unwrap(); // Set nodes fetching params: topology, fetching periodicity. let fetcher = Arc::new(NodesFetcherMock::new()); let fetch_interval = Duration::from_secs(2); // Set health checking params: healthy nodes, checking periodicity. let checker = Arc::new(NodeHealthCheckerMock::new()); let check_interval = Duration::from_secs(1); // One healthy seed is initially present, but the topology has no node. fetcher.overwrite_nodes(vec![]); checker.overwrite_healthy_nodes(vec![node_1.clone()]); // Configure RouteProvider let snapshot = RoundRobinRoutingSnapshot::new(); let client = Client::builder().build().unwrap(); let route_provider = DynamicRouteProviderBuilder::new(snapshot, vec![node_1.clone()], Arc::new(client)) .with_fetcher(fetcher.clone()) .with_checker(checker.clone()) .with_fetch_period(fetch_interval) .with_check_period(check_interval) .build() .await; let route_provider = Arc::new(route_provider); // This time span is required for the snapshot to be fully updated with the new nodes topology and health info. let snapshot_update_duration = fetch_interval + 2 * check_interval; // Test 1: multiple route() calls return a single domain=ic0.app. // HealthManagerActor shouldn't update the snapshot, if the list of fetched nodes is empty, thus we observe the healthy seed. tokio::time::sleep(snapshot_update_duration).await; let routed_domains = route_n_times(3, Arc::clone(&route_provider)); assert_routed_domains(routed_domains, vec![node_1.domain()], 3); // Test 2: multiple route() calls should now return 3 different domains with equal fairness (repetition). // Three nodes are added to the topology, i.e. now the fetched nodes list is non-empty. let node_2 = Node::new("api1.com").unwrap(); let node_3 = Node::new("api2.com").unwrap(); fetcher.overwrite_nodes(vec![node_1.clone(), node_2.clone(), node_3.clone()]); checker.overwrite_healthy_nodes(vec![node_1.clone(), node_2.clone(), node_3.clone()]); tokio::time::sleep(snapshot_update_duration).await; let routed_domains = route_n_times(6, Arc::clone(&route_provider)); assert_routed_domains( routed_domains, vec![node_1.domain(), node_2.domain(), node_3.domain()], 2, ); } } // - none of the seeds [] are healthy // - none of the API node [] is healthy // - return a vector of errors: HealthCheckErrors, FetchErrors, etc.
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/health_check.rs
ic-agent/src/agent/route_provider/dynamic_routing/health_check.rs
use async_trait::async_trait; use bytes::Bytes; use futures_util::FutureExt; use http::{Method, Request, StatusCode, Uri}; use std::{ fmt::Debug, str::FromStr, sync::Arc, time::{Duration, Instant}, }; use stop_token::{StopSource, StopToken}; use crate::agent::{ route_provider::dynamic_routing::{ dynamic_route_provider::DynamicRouteProviderError, messages::{FetchedNodes, NodeHealthState}, node::Node, snapshot::routing_snapshot::RoutingSnapshot, type_aliases::{AtomicSwap, ReceiverMpsc, ReceiverWatch, SenderMpsc}, }, HttpService, }; const CHANNEL_BUFFER: usize = 128; /// A trait representing a health check of the node. #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] pub trait HealthCheck: Send + Sync + Debug { /// Checks the health of the node. async fn check(&self, node: &Node) -> Result<HealthCheckStatus, DynamicRouteProviderError>; } /// A struct representing the health check status of the node. #[derive(Clone, PartialEq, Debug, Default)] pub struct HealthCheckStatus { latency: Option<Duration>, } impl HealthCheckStatus { /// Creates a new `HealthCheckStatus` instance. pub fn new(latency: Option<Duration>) -> Self { Self { latency } } /// Checks if the node is healthy. pub fn is_healthy(&self) -> bool { self.latency.is_some() } /// Get the latency of the health check. pub fn latency(&self) -> Option<Duration> { self.latency } } /// A struct implementing the `HealthCheck` for the nodes. #[derive(Debug)] pub struct HealthChecker { http_client: Arc<dyn HttpService>, #[cfg(not(target_family = "wasm"))] timeout: Duration, } impl HealthChecker { /// Creates a new `HealthChecker` instance. pub fn new( http_client: Arc<dyn HttpService>, #[cfg(not(target_family = "wasm"))] timeout: Duration, ) -> Self { Self { http_client, #[cfg(not(target_family = "wasm"))] timeout, } } } const HEALTH_CHECKER: &str = "HealthChecker"; #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl HealthCheck for HealthChecker { #[allow(unused_mut)] async fn check(&self, node: &Node) -> Result<HealthCheckStatus, DynamicRouteProviderError> { // API boundary node exposes /health endpoint and should respond with 204 (No Content) if it's healthy. let uri = Uri::from_str(&format!("https://{}/health", node.domain())).unwrap(); let request = Request::builder() .method(Method::GET) .uri(uri.clone()) .body(Bytes::new()) .unwrap(); let start = Instant::now(); #[cfg(not(target_family = "wasm"))] let response = tokio::time::timeout( self.timeout, self.http_client.call(&|| Ok(request.clone()), 1, None), ) .await .map_err(|_| { DynamicRouteProviderError::HealthCheckError(format!("GET request to {uri} timed out")) })?; #[cfg(target_family = "wasm")] let response = self .http_client .call(&|| Ok(request.clone()), 1, None) .await; let response = response.map_err(|err| { DynamicRouteProviderError::HealthCheckError(format!( "Failed to execute GET request to {uri}: {err}" )) })?; let latency = start.elapsed(); if response.status() != StatusCode::NO_CONTENT { let err_msg = format!( "{HEALTH_CHECKER}: Unexpected http status code {} for url={uri} received", response.status() ); log!(error, err_msg); return Err(DynamicRouteProviderError::HealthCheckError(err_msg)); } Ok(HealthCheckStatus::new(Some(latency))) } } #[allow(unused)] const HEALTH_CHECK_ACTOR: &str = "HealthCheckActor"; /// A struct performing the health check of the node and sending the health status to the listener. struct HealthCheckActor { /// The health checker. checker: Arc<dyn HealthCheck>, /// The period of the health check. period: Duration, /// The node to check. node: Node, /// The sender channel (listener) to send the health status. sender_channel: SenderMpsc<NodeHealthState>, /// The cancellation token of the actor. token: StopToken, } impl HealthCheckActor { fn new( checker: Arc<dyn HealthCheck>, period: Duration, node: Node, sender_channel: SenderMpsc<NodeHealthState>, token: StopToken, ) -> Self { Self { checker, period, node, sender_channel, token, } } /// Runs the actor. async fn run(self) { loop { let health = self.checker.check(&self.node).await.unwrap_or_default(); let message = NodeHealthState { node: self.node.clone(), health, }; // Inform the listener about node's health. It can only fail if the listener was closed/dropped. self.sender_channel .send(message) .await .expect("Failed to send node's health state"); futures_util::select! { _ = crate::util::sleep(self.period).fuse() => { continue; } _ = self.token.clone().fuse() => { log!(info, "{HEALTH_CHECK_ACTOR}: was gracefully cancelled for node {:?}", self.node); break; } } } } } /// The name of the health manager actor. #[allow(unused)] pub(super) const HEALTH_MANAGER_ACTOR: &str = "HealthManagerActor"; /// A struct managing the health checks of the nodes. /// It receives the fetched nodes from the `NodesFetchActor` and starts the health checks for them. /// It also receives the health status of the nodes from the `HealthCheckActor/s` and updates the routing snapshot. pub(super) struct HealthManagerActor<S> { /// The health checker. checker: Arc<dyn HealthCheck>, /// The period of the health check. period: Duration, /// The routing snapshot, storing the nodes. routing_snapshot: AtomicSwap<S>, /// The receiver channel to listen to the fetched nodes messages. fetch_receiver: ReceiverWatch<FetchedNodes>, /// The sender channel to send the health status of the nodes back to `HealthManagerActor`. check_sender: SenderMpsc<NodeHealthState>, /// The receiver channel to receive the health status of the nodes from the `HealthCheckActor/s`. check_receiver: ReceiverMpsc<NodeHealthState>, /// The sender channel to send the initialization status to `DynamicRouteProvider` (used only once in the init phase). init_sender: SenderMpsc<bool>, /// The cancellation token of the actor. token: StopToken, /// The cancellation token for all the health checks. nodes_token: StopSource, /// The flag indicating if this actor is initialized with healthy nodes. is_initialized: bool, } impl<S> HealthManagerActor<S> where S: RoutingSnapshot, { /// Creates a new `HealthManagerActor` instance. pub fn new( checker: Arc<dyn HealthCheck>, period: Duration, routing_snapshot: AtomicSwap<S>, fetch_receiver: ReceiverWatch<FetchedNodes>, init_sender: SenderMpsc<bool>, token: StopToken, ) -> Self { let (check_sender, check_receiver) = async_channel::bounded(CHANNEL_BUFFER); Self { checker, period, routing_snapshot, fetch_receiver, check_sender, check_receiver, init_sender, token, nodes_token: StopSource::new(), is_initialized: false, } } /// Runs the actor. pub async fn run(mut self) { loop { futures_util::select! { // Process a new array of fetched nodes from NodesFetchActor, if it appeared in the channel. result = self.fetch_receiver.recv().fuse() => { let value = match result { Ok(value) => value, Err(_err) => { log!(error, "{HEALTH_MANAGER_ACTOR}: nodes fetch sender has been dropped: {_err:?}"); continue; } }; // Get the latest value from the channel and mark it as seen. let Some(FetchedNodes { nodes }) = value else { continue }; self.handle_fetch_update(nodes).await; } // Receive health check messages from all running HealthCheckActor/s. msg_opt = self.check_receiver.recv().fuse() => { if let Ok(msg) = msg_opt { self.handle_health_update(msg).await; } } _ = self.token.clone().fuse() => { self.stop_all_checks().await; self.check_receiver.close(); log!(warn, "{HEALTH_MANAGER_ACTOR}: was gracefully cancelled, all nodes health checks stopped"); break; } } } } async fn handle_health_update(&mut self, msg: NodeHealthState) { let current_snapshot = self.routing_snapshot.load_full(); let mut new_snapshot = (*current_snapshot).clone(); new_snapshot.update_node(&msg.node, msg.health.clone()); self.routing_snapshot.store(Arc::new(new_snapshot)); if !self.is_initialized && msg.health.is_healthy() { self.is_initialized = true; // If TIMEOUT_AWAIT_HEALTHY_SEED has been exceeded, the receiver was dropped and send would thus fail. We ignore the failure. let _ = self.init_sender.send(true).await; } } async fn handle_fetch_update(&mut self, nodes: Vec<Node>) { if nodes.is_empty() { // This is a bug in the IC registry. There should be at least one API Boundary Node in the registry. // Updating nodes snapshot with an empty array, would lead to an irrecoverable error, as new nodes couldn't be fetched. // We avoid such updates and just wait for a non-empty list. log!( error, "{HEALTH_MANAGER_ACTOR}: list of fetched nodes is empty" ); return; } log!( debug, "{HEALTH_MANAGER_ACTOR}: fetched nodes received {:?}", nodes ); let current_snapshot = self.routing_snapshot.load_full(); let mut new_snapshot = (*current_snapshot).clone(); // If the snapshot has changed, store it and restart all node's health checks. if new_snapshot.sync_nodes(&nodes) { self.routing_snapshot.store(Arc::new(new_snapshot)); self.stop_all_checks().await; self.start_checks(nodes.to_vec()); } } fn start_checks(&mut self, nodes: Vec<Node>) { // Create a single cancellation token for all started health checks. self.nodes_token = StopSource::new(); for node in nodes { log!( debug, "{HEALTH_MANAGER_ACTOR}: starting health check for node {node:?}" ); let actor = HealthCheckActor::new( Arc::clone(&self.checker), self.period, node, self.check_sender.clone(), self.nodes_token.token(), ); crate::util::spawn(async move { actor.run().await }); } } async fn stop_all_checks(&mut self) { log!( warn, "{HEALTH_MANAGER_ACTOR}: stopping all running health checks" ); self.nodes_token = StopSource::new(); } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/mod.rs
ic-agent/src/agent/route_provider/dynamic_routing/mod.rs
//! A dynamic routing provider for the Internet Computer (IC) Agent that enables resilient, adaptive request routing through API boundary nodes. //! //! The `DynamicRouteProvider` is an implementation of the [`RouteProvider`](super::RouteProvider) trait. It dynamically discovers and monitors API boundary nodes, filters out unhealthy nodes, and routes API calls across healthy nodes using configurable strategies such as round-robin or latency-based routing. //! This ensures robust and performant interactions with the IC network by adapting to changes in node availability and topology. //! //! # Overview //! The IC Agent is capable of dispatching API calls to destination endpoints exposing an [HTTPS interface](https://internetcomputer.org/docs/references/ic-interface-spec#http-interface). These endpoints can be: //! 1. **Replica nodes**: part of the ICP. //! 2. **API boundary nodes**: part of the ICP. //! 3. **HTTP Gateways**: Third-party services that proxy requests to API boundary nodes, e.g., gateways hosted on the `ic0.app` domain. //! //! The Agent uses the [`RouteProvider`](super::RouteProvider) trait, namely its [`route()`](super::RouteProvider::route()) method to determine the destination endpoint for each call. //! For example this trait is implemented for [`Url`](https://docs.rs/url/latest/url/) and [`RoundRobinRouteProvider`](super::RoundRobinRouteProvider). //! The `DynamicRouteProvider` is a more complex implementation, which is intended to be used only for option (2), it provides: //! - **Automatic API Node Discovery**: periodically fetches the latest API boundary node topology. //! - **Health Monitoring**: Continuously checks health of all nodes in the topology. //! - **Flexible Routing**: Directs requests to healthy nodes using built-in or custom strategies: //! - [`RoundRobinRoutingSnapshot`](snapshot::round_robin_routing::RoundRobinRoutingSnapshot): Evenly distributes requests across healthy nodes. //! - [`LatencyRoutingSnapshot`](snapshot::latency_based_routing::LatencyRoutingSnapshot): Prioritizes low-latency nodes via weighted round-robin, with optional penalties if nodes are unavailable within a sliding time window. //! - **Customizability**: Supports custom node fetchers, health checkers, and routing logic. //! # Usage //! The `DynamicRouteProvider` can be used standalone or injected into the agent to enable dynamic routing. There are several ways to instantiate it: //! 1. **Via high-Level Agent API**: Initializes the agent with built-in dynamic routing. This method is user-friendly but provides limited customization options. //! 2. **Via [`DynamicRouteProviderBuilder`](dynamic_route_provider::DynamicRouteProviderBuilder)**: Creates a customized `DynamicRouteProvider` with a specific routing strategy and parameters. //! //! This instance can be used standalone or integrated into the agent via [`AgentBuilder::with_route_provider()`](super::super::AgentBuilder::with_route_provider). //! ## Example: High-Level Agent API //! ```rust //! use anyhow::Result; //! use ic_agent::Agent; //! use url::Url; //! //! #[tokio::main] //! async fn main() -> Result<()> { //! // Use the URL of an IC HTTP Gateway or even better - API boundary node as the initial seed //! let seed_url = Url::parse("https://ic0.app")?; //! //! // The agent starts with the seed node and discovers healthy API nodes dynamically //! // Until then, requests go through the seed, but only if it's healthy. //! let agent = Agent::builder() //! .with_url(seed_url) //! .with_background_dynamic_routing() //! .build()?; //! //! // ... use the agent for API calls //! //! Ok(()) //! } //! ``` //! **Note**: In the example above, `ic0.app` is used as a seed for initial topology discovery. However, it is not a true seed, as it is not an API boundary node in the ICP topology. //! It will be discarded after the first successful discovery. //! ## Example: Customized instantiation #![cfg_attr(feature = "_internal_dynamic-routing", doc = "```rust")] #![cfg_attr(not(feature = "_internal_dynamic-routing"), doc = "```ignore")] //! use std::{sync::Arc, time::Duration}; //! //! use anyhow::Result; //! use ic_agent::{ //! agent::route_provider::{ //! dynamic_routing::{ //! dynamic_route_provider::{DynamicRouteProvider, DynamicRouteProviderBuilder}, //! node::Node, //! snapshot::latency_based_routing::LatencyRoutingSnapshot, //! }, //! RouteProvider, //! }, //! Agent, //! }; //! use reqwest::Client; //! //! #[tokio::main] //! async fn main() -> Result<()> { //! // Choose a routing strategy: top 3 lowest-latency API boundary nodes selected via weighted round-robin //! let routing_strategy = LatencyRoutingSnapshot::new().set_k_top_nodes(3); //! //! // Alternatively, use a basic round-robin routing across all healthy API boundary nodes //! // let routing_strategy = RoundRobinRoutingSnapshot::new(); //! //! // Or implement and provide your own custom routing strategy //! //! // Seed nodes for initial topology discovery //! let seed_nodes = vec![ //! Node::new("ic0.app")?, //! // Optional: add known API boundary nodes to improve resilience //! // Node::new("<api-boundary-node-domain>")?, //! ]; //! //! // HTTP client for health checks and topology discovery //! let client = Client::builder().build()?; //! //! // Build dynamic route provider //! let route_provider: DynamicRouteProvider<LatencyRoutingSnapshot> = //! DynamicRouteProviderBuilder::new(routing_strategy, seed_nodes, Arc::new(client)) //! // Set how often to fetch the latest API boundary node topology //! .with_fetch_period(Duration::from_secs(10)) //! // Set how often to perform health checks on the API boundary nodes //! .with_check_period(Duration::from_secs(2)) //! // Or optionally provide a custom node health checker implementation //! // .with_checker(custom_checker) //! // Or optionally provide a custom topology fetcher implementation //! // .with_fetcher(custom_fetcher) //! .build() //! .await; //! //! // Example: generate routing URLs //! let url_1 = route_provider.route().expect("failed to get routing URL"); //! eprintln!("Generated URL: {url_1}"); //! //! let url_2 = route_provider.route().expect("failed to get routing URL"); //! eprintln!("Generated URL: {url_2}"); //! //! // Or inject route_provider into the agent for dynamic routing //! let agent = Agent::builder() //! .with_route_provider(route_provider) //! .build()?; //! //! // ... use the agent for API calls //! //! Ok(()) //! } //! ``` //! # Implementation Details //! The `DynamicRouteProvider` spawns two background services: //! 1. `NodesFetchActor`: Periodically fetches the latest API boundary node topology and sends updates to the `HealthManagerActor`. //! 2. `HealthManagerActor`: Manages health checks for nodes, starts and stops `HealthCheckActor`s and updates the routing table (routing snapshot) with health information. //! //! These background services ensure the routing table remains up-to-date. //! # Configuration //! The [`DynamicRouteProviderBuilder`](dynamic_route_provider::DynamicRouteProviderBuilder) allows customized instantiation of `DynamicRouteProvider`: //! - **Fetch Period**: How often to fetch node topology (default: 5 seconds). //! - **Health Check Period**: How often to check node health (default: 1 second). //! - **Nodes Fetcher**: Custom implementation of the [`Fetch`](nodes_fetch::Fetch) trait for node discovery. //! - **Health Checker**: Custom implementation of the [`HealthCheck`](health_check::HealthCheck) trait for health monitoring. //! - **Routing Strategy**: Custom implementation of the [`RoutingSnapshot`](snapshot::routing_snapshot::RoutingSnapshot) trait for routing logic. //! //! Two built-in strategies are available: [`LatencyRoutingSnapshot`](snapshot::latency_based_routing::LatencyRoutingSnapshot) and [`RoundRobinRoutingSnapshot`](snapshot::round_robin_routing::RoundRobinRoutingSnapshot). //! //! # Error Handling //! Errors during node fetching or health checking are encapsulated in the [`DynamicRouteProviderError`](dynamic_route_provider::DynamicRouteProviderError) enum: //! - `NodesFetchError`: Occurs when fetching the topology fails. //! - `HealthCheckError`: Occurs when node health checks fail. //! //! These errors are not propagated to the caller. Instead, they are logged internally using the `tracing` crate. To capture these errors, configure a `tracing` subscriber in your application. //! If no healthy nodes are available, the [`route()`](super::RouteProvider::route()) method returns an [`AgentError::RouteProviderError`](super::super::agent_error::AgentError::RouteProviderError). //! # Testing //! The module includes comprehensive tests covering: //! - Mainnet integration with dynamic node discovery. //! - Routing behavior with topology and health updates. //! - Edge cases like initially unhealthy seeds, no healthy nodes, and empty topology fetches. //! //! These tests ensure the `DynamicRouteProvider` behaves correctly in various scenarios. pub mod dynamic_route_provider; /// Health check implementation. pub mod health_check; /// Messages used in dynamic routing. pub(super) mod messages; /// Node implementation. pub mod node; /// Nodes fetch implementation. pub mod nodes_fetch; /// Routing snapshot implementation. pub mod snapshot; #[cfg(test)] #[cfg_attr(target_family = "wasm", allow(unused))] pub(super) mod test_utils; /// Type aliases used in dynamic routing. pub(super) mod type_aliases;
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/nodes_fetch.rs
ic-agent/src/agent/route_provider/dynamic_routing/nodes_fetch.rs
use async_trait::async_trait; use candid::Principal; use futures_util::FutureExt; use std::{fmt::Debug, sync::Arc, time::Duration}; use stop_token::StopToken; use url::Url; #[allow(unused)] use crate::agent::route_provider::dynamic_routing::health_check::HEALTH_MANAGER_ACTOR; use crate::agent::{ route_provider::dynamic_routing::{ dynamic_route_provider::DynamicRouteProviderError, messages::FetchedNodes, node::Node, snapshot::routing_snapshot::RoutingSnapshot, type_aliases::{AtomicSwap, SenderWatch}, }, Agent, HttpService, }; #[allow(unused)] const NODES_FETCH_ACTOR: &str = "NodesFetchActor"; /// Fetcher of nodes in the topology. #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] pub trait Fetch: Sync + Send + Debug { /// Fetches the nodes from the topology. async fn fetch(&self, url: Url) -> Result<Vec<Node>, DynamicRouteProviderError>; } /// A struct representing the fetcher of the nodes from the topology. #[derive(Debug)] pub struct NodesFetcher { http_client: Arc<dyn HttpService>, subnet_id: Principal, // By default, the nodes fetcher is configured to talk to the mainnet of Internet Computer, and verifies responses using a hard-coded public key. // However, for testnets one can set up a custom public key. root_key: Option<Vec<u8>>, } impl NodesFetcher { /// Creates a new `NodesFetcher` instance. pub fn new( http_client: Arc<dyn HttpService>, subnet_id: Principal, root_key: Option<Vec<u8>>, ) -> Self { Self { http_client, subnet_id, root_key, } } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl Fetch for NodesFetcher { async fn fetch(&self, url: Url) -> Result<Vec<Node>, DynamicRouteProviderError> { let agent = Agent::builder() .with_url(url) .with_arc_http_middleware(self.http_client.clone()) .build() .map_err(|err| { DynamicRouteProviderError::NodesFetchError(format!( "Failed to build the agent: {err}" )) })?; if let Some(key) = self.root_key.clone() { agent.set_root_key(key); } let api_bns = agent .fetch_api_boundary_nodes_by_subnet_id(self.subnet_id) .await .map_err(|err| { DynamicRouteProviderError::NodesFetchError(format!( "Failed to fetch API nodes: {err}" )) })?; // If some API BNs have invalid domain names, they are discarded. let nodes = api_bns .into_iter() .filter_map(|api_node| api_node.try_into().ok()) .collect(); return Ok(nodes); } } /// A struct representing the actor responsible for fetching existing nodes and communicating it with the listener. pub(super) struct NodesFetchActor<S> { /// The fetcher object responsible for fetching the nodes. fetcher: Arc<dyn Fetch>, /// Time period between fetches. period: Duration, /// The interval to wait before retrying to fetch the nodes in case of failures. fetch_retry_interval: Duration, /// Communication channel with the listener. fetch_sender: SenderWatch<FetchedNodes>, /// The snapshot of the routing table. routing_snapshot: AtomicSwap<S>, /// The token to cancel/stop the actor. token: StopToken, } impl<S> NodesFetchActor<S> where S: RoutingSnapshot, { /// Creates a new `NodesFetchActor` instance. pub fn new( fetcher: Arc<dyn Fetch>, period: Duration, retry_interval: Duration, fetch_sender: SenderWatch<FetchedNodes>, snapshot: AtomicSwap<S>, token: StopToken, ) -> Self { Self { fetcher, period, fetch_retry_interval: retry_interval, fetch_sender, routing_snapshot: snapshot, token, } } /// Runs the actor. pub async fn run(self) { loop { // Retry until success: // - try to get a healthy node from the routing snapshot // - if snapshot is empty, break the cycle and wait for the next fetch cycle // - using the healthy node, try to fetch nodes from topology // - if failure, sleep and retry // - try send fetched nodes to the listener // - failure should never happen, but we trace it if it does loop { let snapshot = self.routing_snapshot.load(); if let Some(node) = snapshot.next_node() { match self.fetcher.fetch((&node).into()).await { Ok(nodes) => { let msg = Some(FetchedNodes { nodes }); match self.fetch_sender.send(msg) { Ok(()) => break, // message sent successfully, exist the loop Err(_err) => { log!(error, "{NODES_FETCH_ACTOR}: failed to send results to {HEALTH_MANAGER_ACTOR}: {_err:?}"); } } } Err(_err) => { log!( error, "{NODES_FETCH_ACTOR}: failed to fetch nodes: {_err:?}" ); } }; } else { // No healthy nodes in the snapshot, break the cycle and wait for the next fetch cycle log!(error, "{NODES_FETCH_ACTOR}: no nodes in the snapshot"); break; }; log!( warn, "Retrying to fetch the nodes in {:?}", self.fetch_retry_interval ); crate::util::sleep(self.fetch_retry_interval).await; } futures_util::select! { _ = crate::util::sleep(self.period).fuse() => { continue; } _ = self.token.clone().fuse() => { log!(warn, "{NODES_FETCH_ACTOR}: was gracefully cancelled"); break; } } } } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/snapshot/round_robin_routing.rs
ic-agent/src/agent/route_provider/dynamic_routing/snapshot/round_robin_routing.rs
use std::{ collections::HashSet, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, }; use crate::agent::route_provider::{ dynamic_routing::{ health_check::HealthCheckStatus, node::Node, snapshot::routing_snapshot::RoutingSnapshot, }, RoutesStats, }; /// Routing snapshot, which samples nodes in a round-robin fashion. #[derive(Default, Debug, Clone)] pub struct RoundRobinRoutingSnapshot { current_idx: Arc<AtomicUsize>, healthy_nodes: HashSet<Node>, existing_nodes: HashSet<Node>, } impl RoundRobinRoutingSnapshot { /// Creates a new instance of `RoundRobinRoutingSnapshot`. pub fn new() -> Self { Self { current_idx: Arc::new(AtomicUsize::new(0)), healthy_nodes: HashSet::new(), existing_nodes: HashSet::new(), } } } impl RoutingSnapshot for RoundRobinRoutingSnapshot { fn has_nodes(&self) -> bool { !self.healthy_nodes.is_empty() } fn next_node(&self) -> Option<Node> { if self.healthy_nodes.is_empty() { return None; } let prev_idx = self.current_idx.fetch_add(1, Ordering::Relaxed); self.healthy_nodes .iter() .nth(prev_idx % self.healthy_nodes.len()) .cloned() } fn next_n_nodes(&self, n: usize) -> Vec<Node> { if n == 0 { return Vec::new(); } let healthy_nodes = Vec::from_iter(self.healthy_nodes.clone()); let healthy_count = healthy_nodes.len(); if n >= healthy_count { return healthy_nodes.clone(); } let idx = self.current_idx.fetch_add(n, Ordering::Relaxed) % healthy_count; let mut nodes = Vec::with_capacity(n); if healthy_count - idx >= n { nodes.extend_from_slice(&healthy_nodes[idx..idx + n]); } else { nodes.extend_from_slice(&healthy_nodes[idx..]); nodes.extend_from_slice(&healthy_nodes[..n - nodes.len()]); } nodes } fn sync_nodes(&mut self, nodes: &[Node]) -> bool { let new_nodes = HashSet::from_iter(nodes.iter().cloned()); // Find nodes removed from topology. let nodes_removed: Vec<_> = self .existing_nodes .difference(&new_nodes) .cloned() .collect(); let has_removed_nodes = !nodes_removed.is_empty(); // Find nodes added to topology. let nodes_added: Vec<_> = new_nodes .difference(&self.existing_nodes) .cloned() .collect(); let has_added_nodes = !nodes_added.is_empty(); // NOTE: newly added nodes will appear in the healthy_nodes later. // This happens after the first node health check round and a consequent update_node() invocation. self.existing_nodes.extend(nodes_added); nodes_removed.iter().for_each(|node| { self.existing_nodes.remove(node); self.healthy_nodes.remove(node); }); has_added_nodes || has_removed_nodes } fn update_node(&mut self, node: &Node, health: HealthCheckStatus) -> bool { if !self.existing_nodes.contains(node) { return false; } if health.is_healthy() { self.healthy_nodes.insert(node.clone()) } else { self.healthy_nodes.remove(node) } } fn routes_stats(&self) -> RoutesStats { RoutesStats::new(self.existing_nodes.len(), Some(self.healthy_nodes.len())) } } #[cfg(test)] mod tests { use std::collections::HashMap; use std::slice; use std::time::Duration; use std::{collections::HashSet, sync::atomic::Ordering}; use crate::agent::route_provider::dynamic_routing::{ health_check::HealthCheckStatus, node::Node, snapshot::{ round_robin_routing::RoundRobinRoutingSnapshot, routing_snapshot::RoutingSnapshot, }, }; #[test] fn test_snapshot_init() { // Arrange let snapshot = RoundRobinRoutingSnapshot::new(); // Assert assert!(snapshot.healthy_nodes.is_empty()); assert!(snapshot.existing_nodes.is_empty()); assert!(!snapshot.has_nodes()); assert_eq!(snapshot.current_idx.load(Ordering::SeqCst), 0); assert!(snapshot.next_node().is_none()); } #[test] fn test_update_of_non_existing_node_always_returns_false() { // Arrange let mut snapshot = RoundRobinRoutingSnapshot::new(); // This node is not present in existing_nodes let node = Node::new("api1.com").unwrap(); let healthy = HealthCheckStatus::new(Some(Duration::from_secs(1))); let unhealthy = HealthCheckStatus::new(None); // Act 1 let is_updated = snapshot.update_node(&node, healthy); // Assert assert!(!is_updated); assert!(snapshot.existing_nodes.is_empty()); assert!(snapshot.next_node().is_none()); // Act 2 let is_updated = snapshot.update_node(&node, unhealthy); // Assert assert!(!is_updated); assert!(snapshot.existing_nodes.is_empty()); assert!(snapshot.next_node().is_none()); } #[test] fn test_update_of_existing_unhealthy_node_with_healthy_node_returns_true() { // Arrange let mut snapshot = RoundRobinRoutingSnapshot::new(); let node = Node::new("api1.com").unwrap(); // node is present in existing_nodes, but not in healthy_nodes snapshot.existing_nodes.insert(node.clone()); let health = HealthCheckStatus::new(Some(Duration::from_secs(1))); // Act let is_updated = snapshot.update_node(&node, health); assert!(is_updated); assert!(snapshot.has_nodes()); assert_eq!(snapshot.next_node().unwrap(), node); assert_eq!(snapshot.current_idx.load(Ordering::SeqCst), 1); } #[test] fn test_update_of_existing_healthy_node_with_unhealthy_node_returns_true() { // Arrange let mut snapshot = RoundRobinRoutingSnapshot::new(); let node = Node::new("api1.com").unwrap(); snapshot.existing_nodes.insert(node.clone()); snapshot.healthy_nodes.insert(node.clone()); let unhealthy = HealthCheckStatus::new(None); // Act let is_updated = snapshot.update_node(&node, unhealthy); assert!(is_updated); assert!(!snapshot.has_nodes()); assert!(snapshot.next_node().is_none()); } #[test] fn test_sync_node_scenarios() { // Arrange let mut snapshot = RoundRobinRoutingSnapshot::new(); let node_1 = Node::new("api1.com").unwrap(); // Sync with node_1 let nodes_changed = snapshot.sync_nodes(slice::from_ref(&node_1)); assert!(nodes_changed); assert!(snapshot.healthy_nodes.is_empty()); assert_eq!( snapshot.existing_nodes, HashSet::from_iter(vec![node_1.clone()]) ); // Add node_1 to healthy_nodes manually snapshot.healthy_nodes.insert(node_1.clone()); // Sync with node_1 again let nodes_changed = snapshot.sync_nodes(slice::from_ref(&node_1)); assert!(!nodes_changed); assert_eq!( snapshot.existing_nodes, HashSet::from_iter(vec![node_1.clone()]) ); assert_eq!(snapshot.healthy_nodes, HashSet::from_iter(vec![node_1])); // Sync with node_2 let node_2 = Node::new("api2.com").unwrap(); let nodes_changed = snapshot.sync_nodes(slice::from_ref(&node_2)); assert!(nodes_changed); assert_eq!( snapshot.existing_nodes, HashSet::from_iter(vec![node_2.clone()]) ); // Make sure node_1 was removed from healthy nodes assert!(snapshot.healthy_nodes.is_empty()); // Add node_2 to healthy_nodes manually snapshot.healthy_nodes.insert(node_2.clone()); // Sync with [node_2, node_3] let node_3 = Node::new("api3.com").unwrap(); let nodes_changed = snapshot.sync_nodes(&[node_3.clone(), node_2.clone()]); assert!(nodes_changed); assert_eq!( snapshot.existing_nodes, HashSet::from_iter(vec![node_3.clone(), node_2.clone()]) ); assert_eq!(snapshot.healthy_nodes, HashSet::from_iter(vec![node_2])); snapshot.healthy_nodes.insert(node_3); // Sync with [] let nodes_changed = snapshot.sync_nodes(&[]); assert!(nodes_changed); assert!(snapshot.existing_nodes.is_empty()); // Make sure all nodes were removed from the healthy_nodes assert!(snapshot.healthy_nodes.is_empty()); // Sync with [] again let nodes_changed = snapshot.sync_nodes(&[]); assert!(!nodes_changed); assert!(snapshot.existing_nodes.is_empty()); } #[test] fn test_next_node() { // Arrange let mut snapshot = RoundRobinRoutingSnapshot::new(); let node_1 = Node::new("api1.com").unwrap(); let node_2 = Node::new("api2.com").unwrap(); let node_3 = Node::new("api3.com").unwrap(); let nodes = vec![node_1, node_2, node_3]; snapshot.existing_nodes.extend(nodes.clone()); snapshot.healthy_nodes.extend(nodes.clone()); // Act let n = 6; let mut count_map = HashMap::new(); for _ in 0..n { let node = snapshot.next_node().unwrap(); count_map.entry(node).and_modify(|v| *v += 1).or_insert(1); } // Assert each node was returned 2 times let k = 2; assert_eq!( count_map.len(), nodes.len(), "The number of unique elements is not {}", nodes.len() ); for (item, &count) in &count_map { assert_eq!( count, k, "Element {:?} does not appear exactly {} times", item, k ); } } #[test] fn test_n_nodes() { // Arrange let mut snapshot = RoundRobinRoutingSnapshot::new(); let node_1 = Node::new("api1.com").unwrap(); let node_2 = Node::new("api2.com").unwrap(); let node_3 = Node::new("api3.com").unwrap(); let node_4 = Node::new("api4.com").unwrap(); let node_5 = Node::new("api5.com").unwrap(); let nodes = vec![ node_1.clone(), node_2.clone(), node_3.clone(), node_4.clone(), node_5.clone(), ]; snapshot.healthy_nodes.extend(nodes.clone()); // First call let mut n_nodes: Vec<_> = snapshot.next_n_nodes(3); // Second call n_nodes.extend(snapshot.next_n_nodes(3)); // Third call n_nodes.extend(snapshot.next_n_nodes(4)); // Fourth call n_nodes.extend(snapshot.next_n_nodes(5)); // Assert each node was returned 3 times let k = 3; let mut count_map = HashMap::new(); for item in n_nodes.iter() { count_map.entry(item).and_modify(|v| *v += 1).or_insert(1); } assert_eq!( count_map.len(), nodes.len(), "The number of unique elements is not {}", nodes.len() ); for (item, &count) in &count_map { assert_eq!( count, k, "Element {:?} does not appear exactly {} times", item, k ); } } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/snapshot/mod.rs
ic-agent/src/agent/route_provider/dynamic_routing/snapshot/mod.rs
/// Snapshot of the routing table. pub mod latency_based_routing; /// Node implementation. pub mod round_robin_routing; /// Routing snapshot implementation. pub mod routing_snapshot;
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/snapshot/routing_snapshot.rs
ic-agent/src/agent/route_provider/dynamic_routing/snapshot/routing_snapshot.rs
use std::fmt::Debug; use crate::agent::route_provider::{ dynamic_routing::{health_check::HealthCheckStatus, node::Node}, RoutesStats, }; /// A trait for interacting with the snapshot of nodes (routing table). pub trait RoutingSnapshot: Send + Sync + Clone + Debug { /// Returns `true` if the snapshot has nodes. #[allow(unused)] fn has_nodes(&self) -> bool; /// Get next node from the snapshot. fn next_node(&self) -> Option<Node>; /// Get up to n different nodes from the snapshot. fn next_n_nodes(&self, n: usize) -> Vec<Node>; /// Syncs the nodes in the snapshot with the provided list of nodes, returning `true` if the snapshot was updated. fn sync_nodes(&mut self, nodes: &[Node]) -> bool; /// Updates the health status of a specific node, returning `true` if the node was found and updated. fn update_node(&mut self, node: &Node, health: HealthCheckStatus) -> bool; /// Returns statistics about the routes (nodes). fn routes_stats(&self) -> RoutesStats; }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-agent/src/agent/route_provider/dynamic_routing/snapshot/latency_based_routing.rs
ic-agent/src/agent/route_provider/dynamic_routing/snapshot/latency_based_routing.rs
use std::{ collections::{HashMap, HashSet, VecDeque}, sync::Arc, time::Duration, }; use arc_swap::ArcSwap; use rand::Rng; use crate::agent::route_provider::{ dynamic_routing::{ health_check::HealthCheckStatus, node::Node, snapshot::routing_snapshot::RoutingSnapshot, }, RoutesStats, }; // Determines the size of the sliding window used for storing latencies and availabilities of nodes. const WINDOW_SIZE: usize = 15; // Determines the decay rate of the exponential decay function, which is used for generating weights over the sliding window. const LAMBDA_DECAY: f64 = 0.3; /// Generates exponentially decaying weights for the sliding window. /// Weights are higher for more recent observations and decay exponentially for older ones. fn generate_exp_decaying_weights(n: usize, lambda: f64) -> Vec<f64> { let mut weights: Vec<f64> = Vec::with_capacity(n); for i in 0..n { let weight = (-lambda * i as f64).exp(); weights.push(weight); } weights } /// A node candidate eligible for final routing selection based on its score. /// /// # Overview /// This struct represents a node that has passed initial pre-selection criteria and is part of the /// routing candidate pool. The selection process happens in two phases: /// 1. Pre-selection: depending on the settings, either the k-top nodes or all healthy nodes are chosen /// 2. Final selection: a node is probabilistically selected from the candidate pool based on its score #[derive(Clone, Debug)] struct RoutingCandidateNode { node: Node, score: f64, } impl RoutingCandidateNode { fn new(node: Node, score: f64) -> Self { Self { node, score } } } // Stores node's meta information and metrics (latencies, availabilities). // Routing nodes are probabilistically selected based on the score field. #[derive(Clone, Debug)] struct NodeMetrics { // Size of the sliding window used to store latencies and availabilities of the node. window_size: usize, /// Reflects the status of the most recent health check. It should be the same as the last element in `availabilities`. is_healthy: bool, /// Sliding window with latency measurements. latencies: VecDeque<f64>, /// Sliding window with availability measurements. availabilities: VecDeque<bool>, /// Overall score of the node. Calculated based on latencies and availabilities arrays. This score is used in `next_n_nodes()` method for the final nodes selection. score: f64, } impl NodeMetrics { pub fn new(window_size: usize) -> Self { Self { window_size, is_healthy: false, latencies: VecDeque::with_capacity(window_size + 1), availabilities: VecDeque::with_capacity(window_size + 1), score: 0.0, } } pub fn add_latency_measurement(&mut self, latency: Option<Duration>) { self.is_healthy = latency.is_some(); if let Some(duration) = latency { self.latencies.push_back(duration.as_secs_f64()); while self.latencies.len() > self.window_size { self.latencies.pop_front(); } self.availabilities.push_back(true); } else { self.availabilities.push_back(false); } while self.availabilities.len() > self.window_size { self.availabilities.pop_front(); } } } /// Computes the score of the node based on the latencies, availabilities and window weights. /// `window_weights_sum` is passed for efficiency reasons, as it is pre-calculated. fn compute_score( window_weights: &[f64], window_weights_sum: f64, availabilities: &VecDeque<bool>, latencies: &VecDeque<f64>, use_availability_penalty: bool, ) -> f64 { let weights_size = window_weights.len(); let availabilities_size = availabilities.len(); let latencies_size = latencies.len(); if weights_size < availabilities_size { panic!( "Configuration error: Weights array of size {weights_size} is smaller than array of availabilities of size {availabilities_size}.", ); } else if weights_size < latencies_size { panic!( "Configuration error: Weights array of size {weights_size} is smaller than array of latencies of size {latencies_size}.", ); } // Compute normalized availability score [0.0, 1.0]. let score_a = if !use_availability_penalty { 1.0 } else if availabilities.is_empty() { 0.0 } else { let mut score = 0.0; // Compute weighted score. Weights are applied in reverse order. for (idx, availability) in availabilities.iter().rev().enumerate() { score += window_weights[idx] * (*availability as u8 as f64); } // Normalize the score. let weights_sum = if availabilities_size < weights_size { // Use partial sum of weights, if the window is not full. let partial_weights_sum: f64 = window_weights.iter().take(availabilities_size).sum(); partial_weights_sum } else { // Use pre-calculated sum, if the window is full. window_weights_sum }; score /= weights_sum; score }; // Compute latency score (not normalized). let score_l = if latencies.is_empty() { 0.0 } else { let mut score = 0.0; // Compute weighted score. Weights are applied in reverse order. Latency is inverted, so that smaller latencies have higher score. for (idx, latency) in latencies.iter().rev().enumerate() { score += window_weights[idx] / latency; } let weights_sum = if latencies_size < weights_size { let partial_weights_sum: f64 = window_weights.iter().take(latencies.len()).sum(); partial_weights_sum } else { // Use pre-calculated sum. window_weights_sum }; score /= weights_sum; score }; // Combine availability and latency scores via product to emphasize the importance of both metrics. score_l * score_a } /// # Latency-based dynamic routing /// /// This module implements a routing strategy that uses weighted random selection of nodes based on their historical data (latencies and availabilities). /// /// Summary of the routing strategy: /// - Uses sliding windows for storing latencies and availabilities of each node /// - Latency and availability scores are first computed separately from the sliding windows using an additional array of weights, allowing prioritization of more recent observations. By default, exponentially decaying weights are used. /// - The final overall score of each node is computed as a product of latency and availability scores, namely `score = score_l * score_a` /// - Nodes pre-selection phase for routing candidate pool (snapshot): /// - Criteria: if k-top-nodes setting is enabled, then only k nodes with highest scores are filtered into the routing candidate pool (snapshot), otherwise all healthy nodes are used /// - Trigger conditions: topology updates, node health check status updates /// - Final selection of nodes for routing from the candidate pool is probabilistic and is proportional to the score of the node /// /// ## Configuration Options /// - `k_top_nodes`: Limit routing to only top k nodes with highest score /// - `use_availability_penalty`: Whether to penalize nodes for being unavailable /// - Custom window weights can be provided for specialized decay functions #[derive(Default, Debug, Clone)] pub struct LatencyRoutingSnapshot { // If set, only k nodes with best scores are used for routing k_top_nodes: Option<usize>, // Stores all existing nodes in the topology along with their historical data (latencies and availabilities) existing_nodes: HashMap<Node, NodeMetrics>, // Snapshot of nodes, which are pre-selected as candidates for routing. Snapshot is published via publish_routing_nodes() when either: topology changes or a health check of some node is received. routing_candidates: Arc<ArcSwap<Vec<RoutingCandidateNode>>>, // Weights used to compute the availability score of a node. window_weights: Vec<f64>, // Pre-computed weights sum, passed for efficiency purpose as this sum doesn't change. window_weights_sum: f64, // Whether or not penalize nodes score for being unavailable use_availability_penalty: bool, } /// Implementation of the `LatencyRoutingSnapshot`. impl LatencyRoutingSnapshot { /// Creates a new `LatencyRoutingSnapshot` with default configuration. pub fn new() -> Self { // Weights are ordered from left to right, where the leftmost weight is for the most recent health check. let window_weights = generate_exp_decaying_weights(WINDOW_SIZE, LAMBDA_DECAY); // Pre-calculate the sum of weights for efficiency reasons. let window_weights_sum: f64 = window_weights.iter().sum(); Self { k_top_nodes: None, existing_nodes: HashMap::new(), routing_candidates: Arc::new(ArcSwap::new(vec![].into())), use_availability_penalty: true, window_weights, window_weights_sum, } } /// Sets whether to use only k nodes with the highest score for routing. #[allow(unused)] pub fn set_k_top_nodes(mut self, k_top_nodes: usize) -> Self { self.k_top_nodes = Some(k_top_nodes); self } /// Sets whether to use availability penalty in the score computation. #[allow(unused)] pub fn set_availability_penalty(mut self, use_penalty: bool) -> Self { self.use_availability_penalty = use_penalty; self } /// Sets the weights for the sliding window. /// The weights are ordered from left to right, where the leftmost weight is for the most recent health check. #[allow(unused)] pub fn set_window_weights(mut self, weights: &[f64]) -> Self { self.window_weights_sum = weights.iter().sum(); self.window_weights = weights.to_vec(); self } /// Atomically updates the `routing_candidates` fn publish_routing_candidates(&self) { let mut routing_candidates: Vec<RoutingCandidateNode> = self .existing_nodes .iter() .filter(|(_, v)| v.is_healthy) .map(|(k, v)| RoutingCandidateNode::new(k.clone(), v.score)) .collect(); // In case requests are routed to only k-top nodes, pre-select these candidates if let Some(k_top) = self.k_top_nodes { routing_candidates.sort_by(|a, b| { b.score .partial_cmp(&a.score) .unwrap_or(std::cmp::Ordering::Equal) }); if routing_candidates.len() > k_top { routing_candidates.truncate(k_top); } } // Atomically update the table of routing candidates self.routing_candidates.store(Arc::new(routing_candidates)); } } /// Helper function to sample nodes based on their weights. /// Node index is selected based on the input number in range [0.0, 1.0] #[inline(always)] fn weighted_sample(weighted_nodes: &[RoutingCandidateNode], number: f64) -> Option<usize> { if !(0.0..=1.0).contains(&number) || weighted_nodes.is_empty() { return None; } let sum: f64 = weighted_nodes.iter().map(|n| n.score).sum(); if sum == 0.0 { return None; } let mut weighted_number = number * sum; for (idx, node) in weighted_nodes.iter().enumerate() { weighted_number -= node.score; if weighted_number <= 0.0 { return Some(idx); } } // If this part is reached due to floating-point precision, return the last index Some(weighted_nodes.len() - 1) } impl RoutingSnapshot for LatencyRoutingSnapshot { fn has_nodes(&self) -> bool { !self.routing_candidates.load().is_empty() } fn next_node(&self) -> Option<Node> { self.next_n_nodes(1).into_iter().next() } // Uses weighted random sampling algorithm n times. Node can be selected at most once (sampling without replacement). fn next_n_nodes(&self, n: usize) -> Vec<Node> { if n == 0 { return Vec::new(); } let mut routing_candidates: Vec<RoutingCandidateNode> = self.routing_candidates.load().as_ref().clone(); // Limit the number of returned nodes to the number of available nodes let n = std::cmp::min(n, routing_candidates.len()); let mut nodes = Vec::with_capacity(n); let mut rng = rand::thread_rng(); for _ in 0..n { let rand_num = rng.gen::<f64>(); if let Some(idx) = weighted_sample(routing_candidates.as_slice(), rand_num) { let removed_node = routing_candidates.swap_remove(idx); nodes.push(removed_node.node); } } nodes } fn sync_nodes(&mut self, nodes: &[Node]) -> bool { let new_nodes: HashSet<&Node> = nodes.iter().collect(); let mut has_changes = false; // Remove nodes that are no longer present self.existing_nodes.retain(|node, _| { let keep = new_nodes.contains(node); if !keep { has_changes = true; } keep }); // Add new nodes that don't exist yet for node in nodes { if !self.existing_nodes.contains_key(node) { self.existing_nodes .insert(node.clone(), NodeMetrics::new(self.window_weights.len())); has_changes = true; } } if has_changes { self.publish_routing_candidates(); } has_changes } fn update_node(&mut self, node: &Node, health: HealthCheckStatus) -> bool { // Get mut reference to the existing node metrics or return false if not found let updated_node: &mut NodeMetrics = match self.existing_nodes.get_mut(node) { Some(metrics) => metrics, None => return false, }; // Update the node's metrics updated_node.add_latency_measurement(health.latency()); updated_node.score = compute_score( &self.window_weights, self.window_weights_sum, &updated_node.availabilities, &updated_node.latencies, self.use_availability_penalty, ); self.publish_routing_candidates(); true } fn routes_stats(&self) -> RoutesStats { RoutesStats::new( self.existing_nodes.len(), Some(self.routing_candidates.load().len()), ) } } #[cfg(test)] mod tests { use std::{ collections::{HashMap, VecDeque}, slice, time::Duration, }; use crate::agent::route_provider::{ dynamic_routing::{ health_check::HealthCheckStatus, node::Node, snapshot::{ latency_based_routing::{ compute_score, weighted_sample, LatencyRoutingSnapshot, NodeMetrics, RoutingCandidateNode, }, routing_snapshot::RoutingSnapshot, }, }, RoutesStats, }; #[test] fn test_snapshot_init() { // Arrange let snapshot = LatencyRoutingSnapshot::new(); // Assert assert!(snapshot.existing_nodes.is_empty()); assert!(!snapshot.has_nodes()); assert!(snapshot.next_node().is_none()); assert!(snapshot.next_n_nodes(1).is_empty()); assert_eq!(snapshot.routes_stats(), RoutesStats::new(0, Some(0))); } #[test] fn test_update_for_non_existing_node_fails() { // Arrange let mut snapshot = LatencyRoutingSnapshot::new(); let node = Node::new("api1.com").unwrap(); let health = HealthCheckStatus::new(Some(Duration::from_secs(1))); // Act let is_updated = snapshot.update_node(&node, health); // Assert assert!(!is_updated); assert!(snapshot.existing_nodes.is_empty()); assert!(!snapshot.has_nodes()); assert!(snapshot.next_node().is_none()); assert_eq!(snapshot.routes_stats(), RoutesStats::new(0, Some(0))); } #[test] fn test_update_for_existing_node_succeeds() { // Arrange let mut snapshot = LatencyRoutingSnapshot::new() .set_window_weights(&[2.0, 1.0]) .set_availability_penalty(false); let node = Node::new("api1.com").unwrap(); let health = HealthCheckStatus::new(Some(Duration::from_secs(1))); snapshot.sync_nodes(slice::from_ref(&node)); assert_eq!(snapshot.routes_stats(), RoutesStats::new(1, Some(0))); // Check first update let is_updated = snapshot.update_node(&node, health); assert!(is_updated); assert!(snapshot.has_nodes()); let metrics = snapshot.existing_nodes.get(&node).unwrap(); assert_eq!(metrics.score, (2.0 / 1.0) / 2.0); assert_eq!(snapshot.next_node().unwrap(), node); assert_eq!(snapshot.routes_stats(), RoutesStats::new(1, Some(1))); // Check second update let health = HealthCheckStatus::new(Some(Duration::from_secs(2))); let is_updated = snapshot.update_node(&node, health); assert!(is_updated); let metrics = snapshot.existing_nodes.get(&node).unwrap(); assert_eq!(metrics.score, (2.0 / 2.0 + 1.0 / 1.0) / 3.0); // Check third update with none let health = HealthCheckStatus::new(None); let is_updated = snapshot.update_node(&node, health); assert!(is_updated); let metrics = snapshot.existing_nodes.get(&node).unwrap(); assert_eq!(metrics.score, (2.0 / 2.0 + 1.0 / 1.0) / 3.0); assert!(!snapshot.has_nodes()); assert_eq!(snapshot.existing_nodes.len(), 1); assert!(snapshot.next_node().is_none()); assert_eq!(snapshot.routes_stats(), RoutesStats::new(1, Some(0))); // Check fourth update let health = HealthCheckStatus::new(Some(Duration::from_secs(3))); let is_updated = snapshot.update_node(&node, health); assert!(is_updated); let metrics = snapshot.existing_nodes.get(&node).unwrap(); assert_eq!(metrics.score, (2.0 / 3.0 + 1.0 / 2.0) / 3.0); } #[test] fn test_sync_node_scenarios() { // Arrange let mut snapshot = LatencyRoutingSnapshot::new(); let node_1 = Node::new("api1.com").unwrap(); // Sync with node_1 let nodes_changed = snapshot.sync_nodes(slice::from_ref(&node_1)); assert!(nodes_changed); assert!(snapshot.existing_nodes.contains_key(&node_1)); assert!(!snapshot.has_nodes()); // Sync with node_1 again let nodes_changed = snapshot.sync_nodes(slice::from_ref(&node_1)); assert!(!nodes_changed); assert_eq!( snapshot.existing_nodes.keys().collect::<Vec<_>>(), vec![&node_1] ); // Sync with node_2 let node_2 = Node::new("api2.com").unwrap(); let nodes_changed = snapshot.sync_nodes(slice::from_ref(&node_2)); assert!(nodes_changed); assert_eq!( snapshot.existing_nodes.keys().collect::<Vec<_>>(), vec![&node_2] ); assert!(!snapshot.has_nodes()); // Sync with [node_2, node_3] let node_3 = Node::new("api3.com").unwrap(); let nodes_changed = snapshot.sync_nodes(&[node_3.clone(), node_2.clone()]); assert!(nodes_changed); let mut keys = snapshot.existing_nodes.keys().collect::<Vec<_>>(); keys.sort_by(|a, b| a.domain().cmp(b.domain())); assert_eq!(keys, vec![&node_2, &node_3]); assert!(!snapshot.has_nodes()); // Sync with [node_2, node_3] again let nodes_changed = snapshot.sync_nodes(&[node_3.clone(), node_2.clone()]); assert!(!nodes_changed); let mut keys = snapshot.existing_nodes.keys().collect::<Vec<_>>(); keys.sort_by(|a, b| a.domain().cmp(b.domain())); assert_eq!(keys, vec![&node_2, &node_3]); assert!(!snapshot.has_nodes()); // Sync with [] let nodes_changed = snapshot.sync_nodes(&[]); assert!(nodes_changed); assert!(snapshot.existing_nodes.is_empty()); // Sync with [] again let nodes_changed = snapshot.sync_nodes(&[]); assert!(!nodes_changed); assert!(snapshot.existing_nodes.is_empty()); assert!(!snapshot.has_nodes()); } #[test] fn test_weighted_sample() { let node = Node::new("api1.com").unwrap(); // Case 1: empty array let arr: &[RoutingCandidateNode] = &[]; let idx = weighted_sample(arr, 0.5); assert_eq!(idx, None); // Case 2: single element in array let arr = &[RoutingCandidateNode::new(node.clone(), 1.0)]; let idx = weighted_sample(arr, 0.0); assert_eq!(idx, Some(0)); let idx = weighted_sample(arr, 1.0); assert_eq!(idx, Some(0)); // check bounds let idx = weighted_sample(arr, -1.0); assert_eq!(idx, None); let idx = weighted_sample(arr, 1.1); assert_eq!(idx, None); // Case 3: two elements in array (second element has twice the weight of the first) let arr = &[ RoutingCandidateNode::new(node.clone(), 1.0), RoutingCandidateNode::new(node.clone(), 2.0), ]; // prefixed_sum = [1.0, 3.0] let idx = weighted_sample(arr, 0.0); // 0.0 * 3.0 < 1.0 assert_eq!(idx, Some(0)); let idx = weighted_sample(arr, 0.33); // 0.33 * 3.0 < 1.0 assert_eq!(idx, Some(0)); // selection probability ~0.33 let idx = weighted_sample(arr, 0.35); // 0.35 * 3.0 > 1.0 assert_eq!(idx, Some(1)); // selection probability ~0.66 let idx = weighted_sample(arr, 1.0); // 1.0 * 3.0 > 1.0 assert_eq!(idx, Some(1)); // check bounds let idx = weighted_sample(arr, -1.0); assert_eq!(idx, None); let idx = weighted_sample(arr, 1.1); assert_eq!(idx, None); // Case 4: four elements in array let arr = &[ RoutingCandidateNode::new(node.clone(), 1.0), RoutingCandidateNode::new(node.clone(), 2.0), RoutingCandidateNode::new(node.clone(), 1.5), RoutingCandidateNode::new(node.clone(), 2.5), ]; // prefixed_sum = [1.0, 3.0, 4.5, 7.0] let idx = weighted_sample(arr, 0.14); // 0.14 * 7 < 1.0 assert_eq!(idx, Some(0)); // probability ~0.14 let idx = weighted_sample(arr, 0.15); // 0.15 * 7 > 1.0 assert_eq!(idx, Some(1)); let idx = weighted_sample(arr, 0.42); // 0.42 * 7 < 3.0 assert_eq!(idx, Some(1)); // probability ~0.28 let idx = weighted_sample(arr, 0.43); // 0.43 * 7 > 3.0 assert_eq!(idx, Some(2)); let idx = weighted_sample(arr, 0.64); // 0.64 * 7 < 4.5 assert_eq!(idx, Some(2)); // probability ~0.22 let idx = weighted_sample(arr, 0.65); // 0.65 * 7 > 4.5 assert_eq!(idx, Some(3)); let idx = weighted_sample(arr, 0.99); assert_eq!(idx, Some(3)); // probability ~0.35 // check bounds let idx = weighted_sample(arr, -1.0); assert_eq!(idx, None); let idx = weighted_sample(arr, 1.1); assert_eq!(idx, None); } #[test] fn test_compute_score_with_penalty() { let use_penalty = true; // Test empty arrays let weights: &[f64] = &[]; let weights_sum: f64 = weights.iter().sum(); let availabilities = VecDeque::new(); let latencies = VecDeque::new(); let score = compute_score( weights, weights_sum, &availabilities, &latencies, use_penalty, ); assert_eq!(score, 0.0); // Test arrays with one element. let weights: &[f64] = &[2.0, 1.0]; let weights_sum: f64 = weights.iter().sum(); let availabilities = vec![true].into(); let latencies = vec![2.0].into(); let score = compute_score( weights, weights_sum, &availabilities, &latencies, use_penalty, ); let score_l = (2.0 / 2.0) / 2.0; let score_a = 1.0; assert_eq!(score, score_l * score_a); // Test arrays with two element. let weights: &[f64] = &[2.0, 1.0]; let weights_sum: f64 = weights.iter().sum(); let availabilities = vec![true, false].into(); let latencies = vec![1.0, 2.0].into(); let score = compute_score( weights, weights_sum, &availabilities, &latencies, use_penalty, ); let score_l = (2.0 / 2.0 + 1.0 / 1.0) / weights_sum; let score_a = (2.0 * 0.0 + 1.0 * 1.0) / weights_sum; assert_eq!(score, score_l * score_a); // Test arrays of different sizes. let weights: &[f64] = &[3.0, 2.0, 1.0]; let weights_sum: f64 = weights.iter().sum(); let availabilities = vec![true, false, true].into(); let latencies = vec![1.0, 2.0].into(); let score = compute_score( weights, weights_sum, &availabilities, &latencies, use_penalty, ); let score_l = (3.0 / 2.0 + 2.0 / 1.0) / 5.0; let score_a = (3.0 * 1.0 + 2.0 * 0.0 + 1.0 * 1.0) / weights_sum; assert_eq!(score, score_l * score_a); } #[test] #[ignore] // This test is for manual runs to see the statistics for nodes selection probability. fn test_stats_for_next_n_nodes() { // Arrange let mut snapshot = LatencyRoutingSnapshot::new(); let window_size = 1; let node_1 = Node::new("api1.com").unwrap(); let node_2 = Node::new("api2.com").unwrap(); let node_3 = Node::new("api3.com").unwrap(); let node_4 = Node::new("api4.com").unwrap(); let mut metrics_1 = NodeMetrics::new(window_size); let mut metrics_2 = NodeMetrics::new(window_size); let mut metrics_3 = NodeMetrics::new(window_size); let mut metrics_4 = NodeMetrics::new(window_size); metrics_1.is_healthy = true; metrics_2.is_healthy = true; metrics_3.is_healthy = true; metrics_4.is_healthy = false; metrics_1.score = 16.0; metrics_2.score = 8.0; metrics_3.score = 4.0; // even though the score is high, this node should never be selected as it is unhealthy metrics_4.score = 30.0; snapshot.existing_nodes.extend(vec![ (node_1, metrics_1), (node_2, metrics_2), (node_3, metrics_3), (node_4, metrics_4), ]); snapshot.publish_routing_candidates(); let mut stats = HashMap::new(); let experiments = 30; let select_nodes_count = 1; for i in 0..experiments { let nodes = snapshot.next_n_nodes(select_nodes_count); println!("Experiment {i}: selected nodes {nodes:?}"); for item in nodes.into_iter() { *stats.entry(item).or_insert(1) += 1; } } for (node, count) in stats { println!( "Node {:?} is selected with probability {}", node.domain(), count as f64 / experiments as f64 ); } } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ref-tests/src/universal_canister.rs
ref-tests/src/universal_canister.rs
#![allow(dead_code)] //! The Universal Canister (UC) is a canister built in Rust, compiled to Wasm, //! and serves as a canister that can be used for a multitude of tests. //! //! Payloads to UC can execute any arbitrary sequence of system methods, making //! it possible to test different canister behaviors without having to write up //! custom Wat files. use ic_agent::export::Principal; /// Operands used in encoding UC payloads. #[repr(u8)] enum Ops { Noop = 0, PushInt = 2, PushBytes = 3, ReplyDataAppend = 4, Reply = 5, Self_ = 6, Reject = 7, Caller = 8, CallSimple = 9, RejectMessage = 10, RejectCode = 11, IntToBlob = 12, MessagePayload = 13, StableSize = 15, StableGrow = 16, StableRead = 17, StableWrite = 18, DebugPrint = 19, Trap = 20, SetGlobal = 21, GetGlobal = 22, } /// A succinct shortcut for creating a `PayloadBuilder`, which is used to encode /// instructions to be executed by the UC. /// /// Example usage: /// ``` /// use ref_tests::universal_canister::payload; /// // Instruct the UC to reply with the bytes encoding "Hello" /// let bytes = payload().reply_data(b"Hello").build(); /// ``` pub fn payload() -> PayloadBuilder { PayloadBuilder::default() } /// A builder class for building payloads for the universal canister. /// /// Payloads for the UC encode `Ops` representing what instructions to /// execute. #[derive(Default)] pub struct PayloadBuilder(Vec<u8>); impl PayloadBuilder { fn op(self, b: Ops) -> Self { self.byte(b as u8) } fn byte(mut self, b: u8) -> Self { self.0.push(b); self } fn bytes(mut self, b: &[u8]) -> Self { self.0.extend_from_slice(b); self } pub fn push_int(self, int: u32) -> Self { self.op(Ops::PushInt).bytes(&int.to_le_bytes()) } pub fn reply_data(self, data: &[u8]) -> Self { self.push_bytes(data).reply_data_append().reply() } pub fn reply_int(self) -> Self { self.int_to_blob().reply_data_append().reply() } pub fn reply_data_append(self) -> Self { self.op(Ops::ReplyDataAppend) } pub fn append_and_reply(self) -> Self { self.reply_data_append().reply() } pub fn int_to_blob(self) -> Self { self.op(Ops::IntToBlob) } pub fn reply(self) -> Self { self.op(Ops::Reply) } pub fn stable_size(self) -> Self { self.op(Ops::StableSize) } pub fn push_bytes(self, data: &[u8]) -> Self { self.op(Ops::PushBytes) .bytes(&(data.len() as u32).to_le_bytes()) .bytes(data) } pub fn stable_grow(self, additional_pages: u32) -> Self { self.push_int(additional_pages).op(Ops::StableGrow) } pub fn stable_read(self, offset: u32, size: u32) -> Self { self.push_int(offset).push_int(size).op(Ops::StableRead) } pub fn stable_write(self, offset: u32, data: &[u8]) -> Self { self.push_int(offset).push_bytes(data).op(Ops::StableWrite) } /// A query from a UC to another UC. pub fn inter_query<P: Into<Principal>>(self, callee: P, call_args: CallArgs) -> Self { self.call_simple(callee, "query", call_args) } /// An update from a UC to another UC. pub fn inter_update<P: Into<Principal>>(self, callee: P, call_args: CallArgs) -> Self { self.call_simple(callee, "update", call_args) } pub fn call_simple<P: Into<Principal>>( self, callee: P, method: &str, call_args: CallArgs, ) -> Self { self.push_bytes(callee.into().as_slice()) .push_bytes(method.as_bytes()) .push_bytes(call_args.on_reply.as_slice()) .push_bytes(call_args.on_reject.as_slice()) .push_bytes(call_args.other_side.as_slice()) .op(Ops::CallSimple) } pub fn message_payload(self) -> Self { self.op(Ops::MessagePayload) } pub fn reject_message(self) -> Self { self.op(Ops::RejectMessage) } pub fn reject_code(self) -> Self { self.op(Ops::RejectCode) } pub fn reject(self) -> Self { self.op(Ops::Reject) } pub fn noop(self) -> Self { self.op(Ops::Noop) } pub fn caller(self) -> Self { self.op(Ops::Caller) } pub fn self_(self) -> Self { self.op(Ops::Self_) } /// Store data (in a global variable) on the heap. /// NOTE: This does _not_ correspond to a Wasm global. pub fn set_global_data(self, data: &[u8]) -> Self { self.push_bytes(data).op(Ops::SetGlobal) } /// Get data (stored in a global variable) from the heap. /// NOTE: This does _not_ correspond to a Wasm global. pub fn get_global_data(self) -> Self { self.op(Ops::GetGlobal) } pub fn debug_print(self, msg: &[u8]) -> Self { self.push_bytes(msg).op(Ops::DebugPrint) } pub fn trap_with_blob(self, data: &[u8]) -> Self { self.push_bytes(data).op(Ops::Trap) } pub fn trap(self) -> Self { self.trap_with_blob(&[]) // No data provided for trap. } pub fn build(self) -> Vec<u8> { self.0 } } /// Arguments to be passed into `call_simple`. pub struct CallArgs { pub on_reply: Vec<u8>, pub on_reject: Vec<u8>, pub other_side: Vec<u8>, } impl Default for CallArgs { fn default() -> Self { Self { on_reply: Self::default_on_reply(), on_reject: Self::default_on_reject(), other_side: Self::default_other_side(), } } } impl CallArgs { pub fn on_reply<C: Into<Vec<u8>>>(mut self, callback: C) -> Self { self.on_reply = callback.into(); self } pub fn on_reject<C: Into<Vec<u8>>>(mut self, callback: C) -> Self { self.on_reject = callback.into(); self } pub fn other_side<C: Into<Vec<u8>>>(mut self, callback: C) -> Self { self.other_side = callback.into(); self } // The default on_reply callback. // Replies to the caller with whatever arguments passed to it. fn default_on_reply() -> Vec<u8> { PayloadBuilder::default() .message_payload() .reply_data_append() .reply() .build() } // The default on_reject callback. // Replies to the caller with the reject code and message. fn default_on_reject() -> Vec<u8> { PayloadBuilder::default() .reject_code() .int_to_blob() .reply_data_append() .reject_message() .reply_data_append() .reply() .build() } // The default payload to be executed by the callee. // Replies with a message stating who the callee and the caller is. fn default_other_side() -> Vec<u8> { PayloadBuilder::default() .push_bytes(b"Hello ") .reply_data_append() .caller() .reply_data_append() .push_bytes(b" this is ") .reply_data_append() .self_() .reply_data_append() .reply() .build() } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ref-tests/src/lib.rs
ref-tests/src/lib.rs
#![cfg(unix)] // pocket-ic pub mod universal_canister; pub mod utils; pub use utils::*;
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ref-tests/src/utils.rs
ref-tests/src/utils.rs
use ic_agent::export::reqwest::Url; use ic_agent::identity::{Prime256v1Identity, Secp256k1Identity}; use ic_agent::{export::Principal, identity::BasicIdentity, Agent, Identity}; use ic_identity_hsm::HardwareIdentity; use ic_utils::interfaces::{management_canister::builders::MemoryAllocation, ManagementCanister}; use pocket_ic::nonblocking::PocketIc; use pocket_ic::PocketIcBuilder; use std::time::Duration; use std::{convert::TryFrom, error::Error}; const HSM_PKCS11_LIBRARY_PATH: &str = "HSM_PKCS11_LIBRARY_PATH"; const HSM_SLOT_INDEX: &str = "HSM_SLOT_INDEX"; const HSM_KEY_ID: &str = "HSM_KEY_ID"; const HSM_PIN: &str = "HSM_PIN"; pub async fn get_effective_canister_id(pic: &PocketIc) -> Principal { pocket_ic::nonblocking::get_default_effective_canister_id(get_pic_url(pic).to_string()) .await .unwrap() } pub fn create_identity() -> Result<Box<dyn Identity>, String> { if std::env::var(HSM_PKCS11_LIBRARY_PATH).is_ok() { create_hsm_identity().map(|x| Box::new(x) as _) } else { Ok(Box::new(create_basic_identity())) } } fn expect_env_var(name: &str) -> Result<String, String> { std::env::var(name).map_err(|_| format!("Need to specify the {} environment variable", name)) } pub fn create_hsm_identity() -> Result<HardwareIdentity, String> { let path = expect_env_var(HSM_PKCS11_LIBRARY_PATH)?; let slot_index = expect_env_var(HSM_SLOT_INDEX)? .parse::<usize>() .map_err(|e| format!("Unable to parse {} value: {}", HSM_SLOT_INDEX, e))?; let key = expect_env_var(HSM_KEY_ID)?; let id = HardwareIdentity::new(path, slot_index, &key, get_hsm_pin) .map_err(|e| format!("Unable to create hw identity: {}", e))?; Ok(id) } fn get_hsm_pin() -> Result<String, String> { expect_env_var(HSM_PIN) } // The SoftHSM library doesn't like to have two contexts created/initialized at once. // Trying to create two HardwareIdentity instances at the same time results in this error: // Unable to create hw identity: PKCS#11: CKR_CRYPTOKI_ALREADY_INITIALIZED (0x191) // // To avoid this, we use a basic identity for any second identity in tests. // // A shared container of Ctx objects might be possible instead, but my rust-fu is inadequate. pub fn create_basic_identity() -> BasicIdentity { BasicIdentity::from_raw_key(&ic_ed25519::PrivateKey::generate().serialize_raw()) } /// Create a secp256k1identity, which unfortunately will always be the same one /// (So can only use one per test) pub fn create_secp256k1_identity() -> Result<Secp256k1Identity, String> { // generated from the the following commands: // $ openssl ecparam -name secp256k1 -genkey -noout -out identity.pem // $ cat identity.pem let identity_file = " -----BEGIN EC PRIVATE KEY----- MHQCAQEEIJb2C89BvmJERgnT/vJLKpdHZb/hqTiC8EY2QtBRWZScoAcGBSuBBAAK oUQDQgAEDMl7g3vGKLsiLDA3fBRxDE9ZkM3GezZFa5HlKM/gYzNZfU3w8Tijjd73 yeMC60IsMNxDjLqElV7+T7dkb5Ki7Q== -----END EC PRIVATE KEY-----"; let identity = Secp256k1Identity::from_pem(identity_file.as_bytes()) .expect("Cannot create secp256k1 identity from PEM file."); Ok(identity) } pub fn create_prime256v1_identity() -> Result<Prime256v1Identity, String> { // generated from the following command: // $ openssl ecparam -name prime256v1 -genkey -noout -out identity.pem // $ cat identity.pem let identity_file = "\ -----BEGIN EC PRIVATE KEY----- MHcCAQEEIL1ybmbwx+uKYsscOZcv71MmKhrNqfPP0ke1unET5AY4oAoGCCqGSM49 AwEHoUQDQgAEUbbZV4NerZTPWfbQ749/GNLu8TaH8BUS/I7/+ipsu+MPywfnBFIZ Sks4xGbA/ZbazsrMl4v446U5UIVxCGGaKw== -----END EC PRIVATE KEY-----"; let identity = Prime256v1Identity::from_pem(identity_file.as_bytes()) .expect("Cannot create prime256v1 identity from PEM file."); Ok(identity) } pub async fn create_agent( pic: &PocketIc, identity: impl Identity + 'static, ) -> Result<Agent, String> { let url = get_pic_url(pic); let agent = Agent::builder() .with_url(url) .with_identity(identity) .with_max_polling_time(Duration::from_secs(15)) .build() .map_err(|e| format!("{:?}", e))?; agent.fetch_root_key().await.unwrap(); Ok(agent) } pub async fn with_agent<F, R>(f: F) -> R where F: AsyncFnOnce(&PocketIc, Agent) -> Result<R, Box<dyn Error>>, { let agent_identity = create_identity().expect("Could not create an identity."); with_agent_as(agent_identity, f).await } pub async fn with_agent_as<I, F, R>(agent_identity: I, f: F) -> R where I: Identity + 'static, F: AsyncFnOnce(&PocketIc, Agent) -> Result<R, Box<dyn Error>>, { with_pic(async move |pic| { let agent = create_agent(pic, agent_identity) .await .expect("Could not create an agent."); f(pic, agent).await }) .await } fn check_assets_uptodate() -> bool { let repo_dir = std::fs::canonicalize(format!("{}/..", env!("CARGO_MANIFEST_DIR"))).unwrap(); let assets_dir = repo_dir.join("ref-tests/assets"); let checked_paths = [ ".", "pocket-ic", "cycles-wallet.wasm", "universal-canister.wasm.gz", ] .map(|p| assets_dir.join(p)); for path in &checked_paths { if !path.exists() { return false; } } let last_asset_update = repo_dir .join("scripts/download_reftest_assets.sh") .metadata() .expect("failed to get metadata for update script") .modified() .expect("failed to get modification time for update script"); for path in &checked_paths { let metadata = path .metadata() .expect("failed to get metadata for asset file"); let modified = metadata .modified() .expect("failed to get modification time for asset file"); if modified < last_asset_update { return false; } } true } pub async fn with_pic<F, R>(f: F) -> R where F: AsyncFnOnce(&PocketIc) -> Result<R, Box<dyn Error>>, { if !check_assets_uptodate() { panic!("Test assets are out of date. Please run `scripts/download_reftest_assets.sh` to update them."); } let pic = PocketIcBuilder::new() .with_server_binary(format!("{}/assets/pocket-ic", env!("CARGO_MANIFEST_DIR")).into()) .with_nns_subnet() .with_application_subnet() .with_auto_progress() .build_async() .await; match f(&pic).await { Ok(r) => r, Err(e) => panic!("{:?}", e), } } pub fn get_pic_url(pic: &PocketIc) -> Url { pic.get_server_url() .join(&format!("instances/{}/", pic.instance_id)) .unwrap() } pub async fn create_universal_canister( pic: &PocketIc, agent: &Agent, ) -> Result<Principal, Box<dyn Error>> { let canister_wasm = std::fs::read(format!( "{}/assets/universal-canister.wasm.gz", env!("CARGO_MANIFEST_DIR") )) .unwrap(); let ic00 = ManagementCanister::create(agent); let (canister_id,) = ic00 .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; ic00.install(&canister_id, &canister_wasm) .with_raw_arg(vec![]) .call_and_wait() .await?; Ok(canister_id) } pub fn get_wallet_wasm() -> Vec<u8> { std::fs::read(format!( "{}/assets/cycles-wallet.wasm", env!("CARGO_MANIFEST_DIR") )) .unwrap() } pub async fn create_wallet_canister( pic: &PocketIc, agent: &Agent, cycles: Option<u128>, ) -> Result<Principal, Box<dyn Error>> { let canister_wasm = get_wallet_wasm(); let ic00 = ManagementCanister::create(agent); let (canister_id,) = ic00 .create_canister() .as_provisional_create_with_amount(cycles) .with_effective_canister_id(get_effective_canister_id(pic).await) .with_memory_allocation( MemoryAllocation::try_from(8000000000_u64) .expect("Memory allocation must be between 0 and 2^48 (i.e 256TB), inclusively."), ) .call_and_wait() .await?; ic00.install(&canister_id, &canister_wasm) .with_raw_arg(vec![]) .call_and_wait() .await?; Ok(canister_id) } pub async fn with_universal_canister<F, R>(f: F) -> R where F: AsyncFnOnce(&PocketIc, Agent, Principal) -> Result<R, Box<dyn Error>>, { with_agent(async move |pic, agent| { let canister_id = create_universal_canister(pic, &agent).await?; f(pic, agent, canister_id).await }) .await } pub async fn with_universal_canister_as<I, F, R>(identity: I, f: F) -> R where I: Identity + 'static, F: AsyncFnOnce(&PocketIc, Agent, Principal) -> Result<R, Box<dyn Error>>, { with_agent_as(identity, async move |pic, agent| { let canister_id = create_universal_canister(pic, &agent).await?; f(pic, agent, canister_id).await }) .await } pub async fn with_wallet_canister<F, R>(cycles: Option<u128>, f: F) -> R where F: AsyncFnOnce(&PocketIc, Agent, Principal) -> Result<R, Box<dyn Error>>, { with_agent(async move |pic, agent| { let canister_id = create_wallet_canister(pic, &agent, cycles).await?; f(pic, agent, canister_id).await }) .await }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ref-tests/src/bin/prune-ranges.rs
ref-tests/src/bin/prune-ranges.rs
use ic_certification::{Certificate, Delegation, HashTreeNode}; use serde::{Deserialize, Serialize}; use serde_cbor::Serializer; fn main() { let repo_dir = std::fs::canonicalize(format!("{}/..", env!("CARGO_MANIFEST_DIR"))).unwrap(); let response = std::fs::read(repo_dir.join("ic-agent/src/agent/agent_test/ivg37_time.bin")).unwrap(); let response: Response = serde_cbor::from_slice(&response).unwrap(); let cert: Certificate = serde_cbor::from_slice(&response.certificate).unwrap(); let delegation = cert.delegation.clone().unwrap(); let delegation_cert: Certificate = serde_cbor::from_slice(&delegation.certificate).unwrap(); let mut pruned_delegation_tree = delegation_cert.tree.clone().into(); prune_ranges(&mut pruned_delegation_tree); assert_eq!( pruned_delegation_tree.digest(), delegation_cert.tree.digest() ); let pruned_delegation_cert = PrunedDelegationCert { tree: pruned_delegation_tree, signature: delegation_cert.signature, }; let pruned_delegation_cert = tagged_serialize(&pruned_delegation_cert); let pruned_cert = Certificate { delegation: Some(Delegation { certificate: pruned_delegation_cert, ..delegation }), ..cert }; let pruned_cert = tagged_serialize(&pruned_cert); let pruned_response = Response { certificate: pruned_cert, }; let pruned_response = tagged_serialize(&pruned_response); std::fs::write( repo_dir.join("ic-agent/src/agent/agent_test/ivg37_time_pruned_ranges.bin"), pruned_response, ) .unwrap(); } #[derive(Serialize, Deserialize)] struct Response { #[serde(with = "serde_bytes")] certificate: Vec<u8>, } // annoyingly you cannot convert HashTreeNode directly to HashTree #[derive(Serialize, Deserialize)] struct PrunedDelegationCert { tree: HashTreeNode, #[serde(with = "serde_bytes")] signature: Vec<u8>, } fn tagged_serialize<T: Serialize>(value: &T) -> Vec<u8> { let mut buf = Vec::new(); let mut serializer = Serializer::new(&mut buf); serializer.self_describe().unwrap(); value.serialize(&mut serializer).unwrap(); buf } fn prune_ranges(tree: &mut HashTreeNode) { match tree { HashTreeNode::Fork(lr) => { let (left, right) = lr.as_mut(); prune_ranges(left); prune_ranges(right); } HashTreeNode::Labeled(label, subtree) => { if label.as_bytes() == b"canister_ranges" { *tree = HashTreeNode::Pruned(tree.digest()); } else { prune_ranges(subtree); } } _ => {} } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ref-tests/tests/integration.rs
ref-tests/tests/integration.rs
#![cfg(unix)] // pocket-ic //! In this file, please mark all tests that require a running ic-ref as ignored. //! //! Contrary to ic-ref.rs, these tests are not meant to match any other tests. They're //! integration tests with a running IC-Ref. use candid::CandidType; use ic_agent::{ agent::{Envelope, EnvelopeContent, RejectCode, RejectResponse}, export::Principal, AgentError, Identity, }; use ic_certification::Label; use ic_utils::{ call::{AsyncCall, SyncCall}, interfaces::{ management_canister::builders::{CanisterInstallMode, CanisterSettings}, WalletCanister, }, Argument, Canister, }; use ref_tests::{ create_agent, create_basic_identity, create_universal_canister, create_wallet_canister, get_wallet_wasm, universal_canister::payload, with_universal_canister, with_wallet_canister, }; use serde::Serialize; use std::{ borrow::Cow, sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}, }; #[tokio::test] async fn basic_expiry() { with_universal_canister(async move |_, agent, canister_id| { let arg = payload().reply_data(b"hello").build(); // Verify this works first. let result = agent .update(&canister_id, "update") .with_arg(arg.clone()) .expire_after(std::time::Duration::from_secs(120)) .call_and_wait() .await?; assert_eq!(result.as_slice(), b"hello"); // Verify a zero expiry will fail with the proper code. let result = agent .update(&canister_id, "update") .with_arg(arg.clone()) .expire_after(std::time::Duration::from_secs(0)) .call_and_wait() .await; match result.unwrap_err() { AgentError::TimeoutWaitingForResponse() => (), x => panic!("Was expecting an error, got {:?}", x), } let result = agent .update(&canister_id, "update") .with_arg(arg.clone()) .expire_after(std::time::Duration::from_secs(120)) .call_and_wait() .await?; assert_eq!(result.as_slice(), b"hello"); Ok(()) }) .await } #[tokio::test] async fn wait_signed() { with_universal_canister(async move |_, mut agent, canister_id| { fn serialized_bytes(envelope: Envelope) -> Vec<u8> { let mut serialized_bytes = Vec::new(); let mut serializer = serde_cbor::Serializer::new(&mut serialized_bytes); serializer.self_describe().unwrap(); envelope.serialize(&mut serializer).unwrap(); serialized_bytes } let arg = payload().reply_data(b"hello").build(); let ingress_expiry = (SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + Duration::from_secs(120)) .as_nanos() as u64; let agent_identity = Arc::new(create_basic_identity()); agent.set_arc_identity(agent_identity.clone()); let call_envelope_content = EnvelopeContent::Call { sender: agent.get_principal().unwrap(), arg: arg.clone(), ingress_expiry, nonce: None, canister_id, method_name: "update".to_string(), }; let call_request_id = call_envelope_content.to_request_id(); let call_signature = agent_identity.sign(&call_envelope_content).unwrap(); let call_envelope = Envelope { content: Cow::Borrowed(&call_envelope_content), sender_pubkey: call_signature.public_key, sender_sig: call_signature.signature, sender_delegation: call_signature.delegations, }; let call_envelope_serialized = serialized_bytes(call_envelope); agent .update_signed(canister_id, call_envelope_serialized) .await .unwrap(); let paths: Vec<Vec<Label>> = vec![vec![ "request_status".into(), call_request_id.to_vec().into(), ]]; let read_state_envelope_content = EnvelopeContent::ReadState { sender: agent.get_principal().unwrap(), paths, ingress_expiry, }; let read_signature = agent_identity.sign(&read_state_envelope_content).unwrap(); let read_state_envelope = Envelope { content: Cow::Borrowed(&read_state_envelope_content), sender_pubkey: read_signature.public_key, sender_sig: read_signature.signature, sender_delegation: read_signature.delegations, }; let read_envelope_serialized = serialized_bytes(read_state_envelope); let (result, _) = agent .wait_signed(&call_request_id, canister_id, read_envelope_serialized) .await .unwrap(); assert_eq!(result.as_slice(), b"hello"); Ok(()) }) .await } #[tokio::test] async fn canister_query() { with_universal_canister(async move |_, agent, canister_id| { let universal = Canister::builder() .with_canister_id(canister_id) .with_agent(&agent) .build()?; let arg = payload().reply_data(b"hello").build(); let out = universal .query("query") .with_arg_raw(arg) .build::<()>() .call_raw() .await?; assert_eq!(out, b"hello"); Ok(()) }) .await } #[tokio::test] async fn canister_reject_call() { // try to call a wallet method, but on the universal canister. // this lets us look up the reject code and reject message in the certificate. with_universal_canister(async move |pic, agent, wallet_id| { let alice = WalletCanister::create(&agent, wallet_id).await?; let bob = WalletCanister::create(&agent, create_wallet_canister(pic, &agent, None).await?) .await?; let result = alice.wallet_send(*bob.canister_id(), 1_000_000).await; assert!( matches!( &result, Err(AgentError::CertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), .. }, .. }) if reject_message.contains(&format!( "Canister {}: Canister has no update method 'wallet_send'", alice.canister_id() )) && error_code == "IC0536" ), "wrong error: {result:?}" ); Ok(()) }) .await } #[tokio::test] async fn read_state_canister() { with_universal_canister(async move |_, agent, canister_id| { let blob = agent .read_state_canister_info(canister_id, "module_hash") .await?; assert_eq!(blob.len(), 32); let controllers = agent.read_state_canister_controllers(canister_id).await?; assert_eq!(controllers.len(), 1); let hash = agent.read_state_canister_module_hash(canister_id).await?; assert_eq!(hash.len(), 32); Ok(()) }) .await } #[tokio::test] async fn wallet_canister_forward() { with_wallet_canister(None, async move |pic, agent, wallet_id| { let wallet = WalletCanister::create(&agent, wallet_id).await?; let universal_id = create_universal_canister(pic, &agent).await?; // Perform an "echo" call through the wallet canister. // We encode the result in DIDL to decode it on the other side (would normally get // a Vec<u8>). let arg = payload() .reply_data(b"DIDL\0\x01\x71\x0bHello World") .build(); let args = Argument::from_raw(arg); let (result,): (String,) = wallet .call(universal_id, "update", args, 0) .call_and_wait() .await .unwrap(); assert_eq!(result, "Hello World"); Ok(()) }) .await } #[tokio::test] async fn wallet_canister_create_and_install() { with_wallet_canister( Some(1_000_000_000_000_000), async move |_, agent, wallet_id| { let wallet = WalletCanister::create(&agent, wallet_id).await?; let create_result = wallet .wallet_create_canister(200_000_000_000_000, None, None, None, None) .await?; #[derive(CandidType)] struct CanisterInstall { mode: CanisterInstallMode, canister_id: Principal, wasm_module: Vec<u8>, arg: Vec<u8>, } let install_config = CanisterInstall { mode: CanisterInstallMode::Install, canister_id: create_result.canister_id, wasm_module: b"\0asm\x01\0\0\0".to_vec(), arg: Argument::default().serialize()?, }; let args = Argument::from_candid((install_config,)); wallet .call64::<(), _>(Principal::management_canister(), "install_code", args, 0) .call_and_wait() .await?; Ok(()) }, ) .await } #[tokio::test] async fn wallet_create_and_set_controller() { with_wallet_canister(None, async move |pic, agent, wallet_id| { eprintln!("Parent wallet canister id: {:?}", wallet_id.to_text()); let wallet = WalletCanister::create(&agent, wallet_id).await?; // get the wallet wasm from the environment let wallet_wasm = get_wallet_wasm(); // store the wasm into the wallet wallet .wallet_store_wallet_wasm(wallet_wasm.to_vec()) .call_and_wait() .await?; // controller let other_agent_identity = create_basic_identity(); let other_agent_principal = other_agent_identity.sender()?; let other_agent = create_agent(pic, other_agent_identity).await?; other_agent.fetch_root_key().await?; eprintln!("Agent id: {:?}", other_agent_principal.to_text()); let create_result = wallet .wallet_create_wallet( 1_000_000_000_000_u128, Some(vec![other_agent_principal]), None, None, None, ) .await?; eprintln!( "Child wallet canister id: {:?}", create_result.canister_id.clone().to_text() ); eprintln!("...build child_wallet"); let child_wallet = WalletCanister::from_canister( Canister::builder() .with_agent(&other_agent) .with_canister_id(create_result.canister_id) .build()?, ) .await?; eprintln!("...child_wallet.get_controllers"); let (controller_list,) = child_wallet.get_controllers().call().await?; assert!(controller_list.len() == 1); assert_eq!(controller_list[0], other_agent_principal); eprintln!("...child_wallet.list_addresses"); let (address_entries,): (Vec<ic_utils::interfaces::wallet::AddressEntry>,) = child_wallet.list_addresses().call().await?; for address in address_entries.iter() { eprintln!("id {:?} is a {:?}", address.id.to_text(), address.role); } Ok(()) }) .await } #[tokio::test] async fn wallet_create_wallet() { with_wallet_canister(None, async move |_, agent, wallet_id| { eprintln!("Parent wallet canister id: {:?}", wallet_id.to_text()); let wallet = WalletCanister::create(&agent, wallet_id).await?; let wallet_initial_balance = wallet.wallet_balance().await?; // get the wallet wasm from the environment let wallet_wasm = get_wallet_wasm(); // store the wasm into the wallet wallet .wallet_store_wallet_wasm(wallet_wasm.to_vec()) .call_and_wait() .await?; // create a child wallet let child_create_res = wallet .wallet_create_wallet(1_000_000_000_000_u128, None, None, None, None) .await?; eprintln!( "Created child wallet one.\nChild wallet one canister id: {:?}", child_create_res.canister_id.to_text() ); let (child_wallet_balance,): (ic_utils::interfaces::wallet::BalanceResult<u64>,) = wallet .call64( child_create_res.canister_id, "wallet_balance", Argument::default(), 0, ) .call_and_wait() .await?; eprintln!( "Child wallet one cycle balance: {}", child_wallet_balance.amount ); // // create a second child wallet // let child_two_create_res = wallet .wallet_create_wallet(2_100_000_000_000_u128, None, None, None, None) .await?; eprintln!( "Created child wallet two.\nChild wallet two canister id: {:?}", child_two_create_res.canister_id.to_text() ); let (child_wallet_two_balance,): (ic_utils::interfaces::wallet::BalanceResult<u64>,) = wallet .call64( child_two_create_res.canister_id, "wallet_balance", Argument::default(), 0, ) .call_and_wait() .await?; eprintln!( "Child wallet two cycle balance: {}", child_wallet_two_balance.amount ); // // Get wallet intermediate balance // let (wallet_intermediate_balance,) = wallet.wallet_balance64().call().await?; eprintln!( "Parent wallet initial balance: {}\n intermediate balance: {}", wallet_initial_balance.amount, wallet_intermediate_balance.amount ); // // Create a grandchild wallet from second child wallet // #[derive(CandidType)] struct In { cycles: u64, settings: CanisterSettings, } let create_args = In { cycles: 1_000_000_000_000_u64, settings: CanisterSettings { controllers: None, compute_allocation: None, memory_allocation: None, freezing_threshold: None, reserved_cycles_limit: None, wasm_memory_limit: None, wasm_memory_threshold: None, log_visibility: None, environment_variables: None, }, }; let args = Argument::from_candid((create_args,)); let (grandchild_create_res,): (Result<ic_utils::interfaces::wallet::CreateResult, String>,) = wallet .call64( child_two_create_res.canister_id, "wallet_create_wallet", args, 0, ) .call_and_wait() .await?; let grandchild_create_res = grandchild_create_res?; eprintln!( "Created grandchild wallet from child wallet two.\nGrandchild wallet canister id: {:?}", grandchild_create_res.canister_id.to_text() ); let (wallet_final_balance,) = wallet.wallet_balance64().call().await?; eprintln!( "Parent wallet initial balance: {}\n final balance: {}", wallet_initial_balance.amount, wallet_final_balance.amount ); Ok(()) }).await } #[tokio::test] async fn wallet_canister_funds() { let provisional_amount = 1_000_000_000_000_000; with_wallet_canister( Some(provisional_amount), async move |pic, agent, wallet_id| { let alice = WalletCanister::create(&agent, wallet_id).await?; let bob = WalletCanister::create( &agent, create_wallet_canister(pic, &agent, Some(provisional_amount)).await?, ) .await?; let alice_previous_balance = alice.wallet_balance().await?; let bob_previous_balance = bob.wallet_balance().await?; alice.wallet_send(*bob.canister_id(), 1_000_000).await?; let bob_balance = bob.wallet_balance().await?; let alice_balance = alice.wallet_balance().await?; eprintln!( "Alice previous: {}\n current: {}", alice_previous_balance.amount, alice_balance.amount ); eprintln!( "Bob previous: {}\n current: {}", bob_previous_balance.amount, bob_balance.amount ); assert!( bob_balance.amount > bob_previous_balance.amount + 500_000, "Wrong: {} > {}", bob_balance.amount, bob_previous_balance.amount + 500_000 ); assert!(alice_balance.amount < alice_previous_balance.amount - 500_000); Ok(()) }, ) .await } #[tokio::test] async fn wallet_helper_functions() { with_wallet_canister(None, async move |pic, agent, wallet_id| { // name let wallet = WalletCanister::create(&agent, wallet_id).await?; let (name,) = wallet.name().call().await?; assert!(name.is_none(), "Name should be none."); let wallet_name = "Alice".to_string(); wallet.set_name(wallet_name.clone()).call_and_wait().await?; let (name,) = wallet.name().call().await?; assert_eq!(name, Some(wallet_name)); // controller let other_agent_identity = create_basic_identity(); let other_agent_principal = other_agent_identity.sender()?; let other_agent = create_agent(pic, other_agent_identity).await?; other_agent.fetch_root_key().await?; let (controller_list,) = wallet.get_controllers().call().await?; assert_eq!(controller_list.len(), 1); assert_ne!(&controller_list[0], &other_agent_principal); wallet .add_controller(other_agent_principal) .call_and_wait() .await?; let (controller_list,) = wallet.get_controllers().call().await?; assert_eq!(controller_list.len(), 2); let added = if controller_list[0] == other_agent_principal { true } else { controller_list[1] == other_agent_principal }; assert!(added); wallet .remove_controller(other_agent_principal) .call_and_wait() .await?; let (controller_list,) = wallet.get_controllers().call().await?; assert_eq!(controller_list.len(), 1); assert_ne!(&controller_list[0], &other_agent_principal); Ok(()) }) .await } mod sign_send { use ic_agent::{ agent::{ signed_query_inspect, signed_request_status_inspect, signed_update_inspect, ReplyResponse, RequestStatusResponse, }, AgentError, }; use ref_tests::{universal_canister::payload, with_universal_canister}; use std::{thread, time}; #[tokio::test] async fn query() { with_universal_canister(async move |_, agent, canister_id| { let arg = payload().reply_data(b"hello").build(); let signed_query = agent.query(&canister_id, "query").with_arg(arg).sign()?; assert!(signed_query_inspect( signed_query.sender, signed_query.canister_id, &signed_query.method_name, &signed_query.arg, signed_query.ingress_expiry, signed_query.signed_query.clone() ) .is_ok()); let result = agent .query_signed( signed_query.effective_canister_id, signed_query.signed_query, ) .await?; assert_eq!(result, b"hello"); Ok(()) }) .await } #[tokio::test] async fn update_then_request_status() { with_universal_canister(async move |_, agent, canister_id| { let arg = payload().reply_data(b"hello").build(); let signed_update = agent.update(&canister_id, "update").with_arg(arg).sign()?; assert!(signed_update_inspect( signed_update.sender, signed_update.canister_id, &signed_update.method_name, &signed_update.arg, signed_update.ingress_expiry, signed_update.signed_update.clone() ) .is_ok()); let signed_request_status = agent.sign_request_status( signed_update.effective_canister_id, signed_update.request_id, )?; assert!(signed_request_status_inspect( signed_request_status.sender, &signed_request_status.request_id, signed_request_status.ingress_expiry, signed_request_status.signed_request_status.clone() ) .is_ok()); let _request_id = agent .update_signed( signed_update.effective_canister_id, signed_update.signed_update, ) .await?; let ten_secs = time::Duration::from_secs(10); thread::sleep(ten_secs); let (response, _) = agent .request_status_signed( &signed_request_status.request_id, signed_request_status.effective_canister_id, signed_request_status.signed_request_status.clone(), ) .await?; assert!( matches!(response, RequestStatusResponse::Replied(ReplyResponse { arg: result, .. }) if result == b"hello") ); Ok(()) }).await } #[tokio::test] async fn forged_query() { with_universal_canister(async move |_, agent, canister_id| { let arg = payload().reply_data(b"hello").build(); let mut signed_query = agent.query(&canister_id, "query").with_arg(arg).sign()?; signed_query.method_name = "non_query".to_string(); let result = signed_query_inspect( signed_query.sender, signed_query.canister_id, &signed_query.method_name, &signed_query.arg, signed_query.ingress_expiry, signed_query.signed_query.clone(), ); assert!(matches!(result, Err(AgentError::CallDataMismatch{field, value_arg, value_cbor}) if field == *"method_name" && value_arg == *"non_query" && value_cbor == *"query")); Ok(()) }).await } } mod identity { use candid::Principal; use ic_agent::{ identity::{ DelegatedIdentity, Delegation, Prime256v1Identity, Secp256k1Identity, SignedDelegation, }, Identity, }; use rand::thread_rng; use ref_tests::utils::create_basic_identity; use ref_tests::{universal_canister::payload, with_universal_canister_as}; #[tokio::test] async fn delegated_eddsa_identity() { let sending_identity = create_basic_identity(); let signing_identity = create_basic_identity(); let delegation = Delegation { expiration: i64::MAX as u64, pubkey: signing_identity.public_key().unwrap(), targets: None, }; let signature = sending_identity.sign_delegation(&delegation).unwrap(); let delegated_identity = DelegatedIdentity::new( signature.public_key.unwrap(), Box::new(signing_identity), vec![SignedDelegation { delegation, signature: signature.signature.unwrap(), }], ) .unwrap(); with_universal_canister_as(delegated_identity, async move |_, agent, canister| { let payload = payload().caller().append_and_reply().build(); let caller_resp = agent .query(&canister, "query") .with_arg(payload) .call() .await .unwrap(); let caller = Principal::from_slice(&caller_resp); assert_eq!(caller, sending_identity.sender().unwrap()); Ok(()) }) .await } #[tokio::test] async fn delegated_ecdsa_identity() { let mut random = thread_rng(); let sending_identity = Secp256k1Identity::from_private_key(k256::SecretKey::random(&mut random)); let signing_identity = Prime256v1Identity::from_private_key(p256::SecretKey::random(&mut random)); let delegation = Delegation { expiration: i64::MAX as u64, pubkey: signing_identity.public_key().unwrap(), targets: None, }; let signature = sending_identity.sign_delegation(&delegation).unwrap(); let delegated_identity = DelegatedIdentity::new( signature.public_key.unwrap(), Box::new(signing_identity), vec![SignedDelegation { delegation, signature: signature.signature.unwrap(), }], ) .unwrap(); with_universal_canister_as(delegated_identity, async |_, agent, canister| { let payload = payload().caller().append_and_reply().build(); let caller_resp = agent .query(&canister, "query") .with_arg(payload) .call() .await .unwrap(); let caller = Principal::from_slice(&caller_resp); assert_eq!(caller, sending_identity.sender().unwrap()); Ok(()) }) .await } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ref-tests/tests/ic-ref.rs
ref-tests/tests/ic-ref.rs
#![cfg(unix)] // pocket-ic //! In this file, please mark all tests that require a running ic-ref as ignored. //! //! These tests are a Rust-like version using the Agent to cover the same tests //! as the IC Ref repo itself. //! //! The tests can be found in the Spec.hs file in the IC Ref repo. //! https://github.com/dfinity/ic-hs/blob/master/src/IC/Test/Spec.hs //! //! Try to keep these tests as close to 1-to-1 to the IC Ref test use cases. For //! every spec in the IC Ref tests, there should be a matching spec here. Some //! tests (like invalid CBOR or special Headers) might not be translatable, in //! which case they should still be added here but do nothing (just keep the //! use case being tested). use ref_tests::{universal_canister, with_agent}; #[tokio::test] async fn status_endpoint() { with_agent(async move |_, agent| { agent.status().await?; Ok(()) }) .await } mod management_canister { use candid::CandidType; use ic_agent::{ agent::{RejectCode, RejectResponse}, export::Principal, AgentError, Identity, }; use ic_utils::{ call::AsyncCall, interfaces::{ management_canister::{ builders::{ CanisterInstallMode, CanisterSettings, UpgradeFlags, WasmMemoryPersistence, }, CanisterStatusResult, CanisterStatusType, }, wallet::CreateResult, ManagementCanister, WalletCanister, }, Argument, }; use ref_tests::get_effective_canister_id; use ref_tests::{ create_agent, create_basic_identity, create_prime256v1_identity, create_secp256k1_identity, with_agent, with_wallet_canister, }; use sha2::{Digest, Sha256}; use std::collections::HashSet; use std::convert::TryInto; mod create_canister { use super::with_agent; use ic_agent::{ agent::{RejectCode, RejectResponse}, export::Principal, AgentError, AgentError::HttpError, }; use ic_utils::interfaces::ManagementCanister; use ref_tests::get_effective_canister_id; use std::str::FromStr; #[tokio::test] async fn no_id_given() { with_agent(async move |pic, agent| { let ic00 = ManagementCanister::create(&agent); let _ = ic00 .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; Ok(()) }) .await } #[tokio::test] async fn create_canister_necessary() { with_agent(async move |_, agent| { let ic00 = ManagementCanister::create(&agent); let canister_wasm = b"\0asm\x01\0\0\0".to_vec(); let result = ic00 .install_code( &Principal::from_str("75hes-oqbaa-aaaaa-aaaaa-aaaaa-aaaaa-aaaaa-q") .unwrap(), &canister_wasm, ) .call_and_wait() .await; assert!( matches!(result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::DestinationInvalid, ref reject_message, error_code: Some(ref error_code) }, .. }) if reject_message == "Canister 75hes-oqbaa-aaaaa-aaaaa-aaaaa-aaaaa-aaaaa-q not found" && error_code == "IC0301") || matches!(result, Err(HttpError(content)) if content.status == 400 && (content.content == b"Canister 75hes-oqbaa-aaaaa-aaaaa-aaaaa-aaaaa-aaaaa-q does not belong to any subnet." || content.content.starts_with(b"error: canister_not_found"))) ); Ok(()) }).await } } #[tokio::test] async fn management() { use ref_tests::get_effective_canister_id; with_agent(async move |pic, agent| { let ic00 = ManagementCanister::create(&agent); let (canister_id,) = ic00 .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; let canister_wasm = b"\0asm\x01\0\0\0".to_vec(); // Install once. ic00.install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Install) .call_and_wait() .await?; // Re-install should fail. let result = ic00 .install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Install) .call_and_wait() .await; assert!(matches!(result, Err(AgentError::CertifiedReject { .. }))); // Reinstall should succeed. ic00.install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Reinstall) .call_and_wait() .await?; // Each agent has their own identity. let other_agent_identity = create_basic_identity(); let other_agent_principal = other_agent_identity.sender()?; let other_agent = create_agent(pic, other_agent_identity).await?; other_agent.fetch_root_key().await?; let other_ic00 = ManagementCanister::create(&other_agent); // Reinstall with another agent should fail. let result = other_ic00 .install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Reinstall) .call_and_wait() .await; assert!(matches!(result, Err(AgentError::UncertifiedReject { .. }))); // Upgrade should succeed. ic00.install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Upgrade(Some(UpgradeFlags { skip_pre_upgrade: Some(true), wasm_memory_persistence: None, }))) .call_and_wait() .await?; // Upgrade with another agent should fail. let result = other_ic00 .install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Upgrade(Some(UpgradeFlags { skip_pre_upgrade: None, wasm_memory_persistence: Some(WasmMemoryPersistence::Keep), }))) .call_and_wait() .await; assert!(matches!(result, Err(AgentError::UncertifiedReject { .. }))); // Change controller. ic00.update_settings(&canister_id) .with_controller(other_agent_principal) .call_and_wait() .await?; // Change controller with wrong controller should fail let result = ic00 .update_settings(&canister_id) .with_controller(other_agent_principal) .call_and_wait() .await; assert!( matches!(result, Err(AgentError::UncertifiedReject { reject: RejectResponse{ reject_code: RejectCode::CanisterError, reject_message, error_code: Some(ref error_code), }, .. }) if reject_message == format!("Only controllers of canister {} can call ic00 method update_settings", canister_id) && error_code == "IC0512") ); // Reinstall as new controller other_ic00 .install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Reinstall) .call_and_wait() .await?; // Reinstall on empty should succeed. let (canister_id_2,) = ic00 .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; // Reinstall over empty canister ic00.install_code(&canister_id_2, &canister_wasm) .with_mode(CanisterInstallMode::Reinstall) .call_and_wait() .await?; // Create an empty canister let (canister_id_3,) = other_ic00 .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; // Check status for empty canister let result = other_ic00 .canister_status(&canister_id_3) .call_and_wait() .await?; assert_eq!(result.0.status, CanisterStatusType::Running); assert_eq!(result.0.settings.controllers.len(), 1); assert_eq!(result.0.settings.controllers[0], other_agent_principal); assert_eq!(result.0.module_hash, None); // Install wasm. other_ic00 .install_code(&canister_id_3, &canister_wasm) .with_mode(CanisterInstallMode::Install) .call_and_wait() .await?; // Check status after installing wasm and validate module_hash let result = other_ic00 .canister_status(&canister_id_3) .call_and_wait() .await?; let sha256_digest = Sha256::digest(&canister_wasm); assert_eq!(result.0.module_hash, Some(sha256_digest.to_vec())); Ok(()) }).await } #[tokio::test] async fn multiple_canisters_aaaaa_aa_but_really_provisional() { with_agent(async move |pic, agent| { let agent_principal = agent.get_principal()?; // Each agent has their own identity. let other_agent_identity = create_basic_identity(); let other_agent_principal = other_agent_identity.sender()?; let other_agent = create_agent(pic, other_agent_identity).await?; other_agent.fetch_root_key().await?; let other_ic00 = ManagementCanister::create(&other_agent); let secp256k1_identity = create_secp256k1_identity()?; let secp256k1_principal = secp256k1_identity.sender()?; let secp256k1_agent = create_agent(pic, secp256k1_identity).await?; secp256k1_agent.fetch_root_key().await?; let secp256k1_ic00 = ManagementCanister::create(&secp256k1_agent); let prime256v1_identity = create_prime256v1_identity()?; let prime256v1_principal = prime256v1_identity.sender()?; let prime256v1_agent = create_agent(pic, prime256v1_identity).await?; prime256v1_agent.fetch_root_key().await?; let prime256v1_ic00 = ManagementCanister::create(&prime256v1_agent); let ic00 = ManagementCanister::create(&agent); let (canister_id,) = ic00 .create_canister() .as_provisional_create_with_amount(None) // ok .with_effective_canister_id(get_effective_canister_id(pic).await) //.with_canister_id("aaaaa-aa") .with_controller(agent_principal) .with_controller(other_agent_principal) .call_and_wait() .await?; // Controllers should be able to fetch the canister status. let result = ic00.canister_status(&canister_id).call_and_wait().await?; assert_eq!(result.0.settings.controllers.len(), 2); let actual = result .0 .settings .controllers .iter() .cloned() .collect::<HashSet<_>>(); let expected = [agent_principal, other_agent_principal] .iter() .cloned() .collect::<HashSet<_>>(); assert_eq!(actual, expected); let result = other_ic00 .canister_status(&canister_id) .call_and_wait() .await?; assert_eq!(result.0.settings.controllers.len(), 2); let actual = result .0 .settings .controllers .iter() .cloned() .collect::<HashSet<_>>(); let expected = [agent_principal, other_agent_principal] .iter() .cloned() .collect::<HashSet<_>>(); assert_eq!(actual, expected); // Set new controller ic00.update_settings(&canister_id) .with_controller(secp256k1_principal) .call_and_wait() .await?; // Only that controller can get canister status let result = ic00.canister_status(&canister_id).call_and_wait().await; assert_err_or_reject( result, vec![RejectCode::DestinationInvalid, RejectCode::CanisterError], ); let result = other_ic00 .canister_status(&canister_id) .call_and_wait() .await; assert_err_or_reject( result, vec![RejectCode::DestinationInvalid, RejectCode::CanisterError], ); let result = secp256k1_ic00 .canister_status(&canister_id) .call_and_wait() .await?; assert_eq!(result.0.settings.controllers.len(), 1); assert_eq!(result.0.settings.controllers[0], secp256k1_principal); // Only that controller can change the controller again let result = ic00 .update_settings(&canister_id) .with_controller(prime256v1_principal) .call_and_wait() .await; assert_err_or_reject( result, vec![RejectCode::DestinationInvalid, RejectCode::CanisterError], ); let result = other_ic00 .update_settings(&canister_id) .with_controller(prime256v1_principal) .call_and_wait() .await; assert_err_or_reject( result, vec![RejectCode::DestinationInvalid, RejectCode::CanisterError], ); secp256k1_ic00 .update_settings(&canister_id) .with_controller(prime256v1_principal) .call_and_wait() .await?; let result = secp256k1_ic00 .canister_status(&canister_id) .call_and_wait() .await; assert_err_or_reject( result, vec![RejectCode::DestinationInvalid, RejectCode::CanisterError], ); let result = prime256v1_ic00 .canister_status(&canister_id) .call_and_wait() .await?; assert_eq!(result.0.settings.controllers.len(), 1); assert_eq!(result.0.settings.controllers[0], prime256v1_principal); Ok(()) }) .await } fn assert_err_or_reject<S>( result: Result<S, AgentError>, allowed_reject_codes: Vec<RejectCode>, ) { for expected_rc in &allowed_reject_codes { if matches!(result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code, .. }, .. }) if reject_code == *expected_rc) { return; } } assert!( matches!(result, Err(AgentError::HttpError(_))), "expect an HttpError, or a CertifiedReject with reject_code in {:?}", allowed_reject_codes ); } #[tokio::test] async fn canister_lifecycle_and_delete() { with_agent(async move |pic, agent| { let ic00 = ManagementCanister::create(&agent); let (canister_id,) = ic00 .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; let canister_wasm = b"\0asm\x01\0\0\0".to_vec(); // Install once. ic00.install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Install) .call_and_wait() .await?; // A newly installed canister should be running let result = ic00.canister_status(&canister_id).call_and_wait().await; assert_eq!(result?.0.status, CanisterStatusType::Running); // Stop should succeed. ic00.stop_canister(&canister_id).call_and_wait().await?; // Canister should be stopped let result = ic00.canister_status(&canister_id).call_and_wait().await; assert_eq!(result?.0.status, CanisterStatusType::Stopped); // Another stop is a noop ic00.stop_canister(&canister_id).call_and_wait().await?; // Can't call update on a stopped canister let result = agent.update(&canister_id, "update").call_and_wait().await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Canister {canister_id} is stopped") && error_code == "IC0508" ), "wrong error: {result:?}" ); // Can't call query on a stopped canister let result = agent.query(&canister_id, "query").with_arg([]).call().await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Canister {canister_id} is stopped and therefore does not have a CallContextManager") && error_code == "IC0508" ), "wrong error: {result:?}" ); // Upgrade should succeed ic00.install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Upgrade(Some(UpgradeFlags { skip_pre_upgrade: None, wasm_memory_persistence: Some(WasmMemoryPersistence::Replace), }))) .call_and_wait() .await?; // Start should succeed. ic00.start_canister(&canister_id).call_and_wait().await?; // Canister should be running let result = ic00.canister_status(&canister_id).call_and_wait().await; assert_eq!(result?.0.status, CanisterStatusType::Running); // Can call update let result = agent.update(&canister_id, "update").call_and_wait().await; assert!( matches!( &result, Err(AgentError::CertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), }, .. }) if reject_message.contains(&format!("Canister {canister_id}: Canister has no update method 'update'")) && error_code == "IC0536" ), "wrong error: {result:?}" ); // Can call query let result = agent.query(&canister_id, "query").with_arg([]).call().await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), }, .. }) if reject_message.contains(&format!("Canister {}: Canister has no query method 'query'", canister_id)) && error_code == "IC0536", ), "wrong error: {result:?}" ); // Another start is a noop ic00.start_canister(&canister_id).call_and_wait().await?; // Stop should succeed. ic00.stop_canister(&canister_id).call_and_wait().await?; // Delete a stopped canister succeeds. ic00.delete_canister(&canister_id).call_and_wait().await?; // Cannot call update let result = agent.update(&canister_id, "update").call_and_wait().await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::DestinationInvalid, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Canister {} not found", canister_id) && error_code == "IC0301" ), "wrong error: {result:?}" ); // Cannot call query let result = agent.query(&canister_id, "query").with_arg([]).call().await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::DestinationInvalid, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Canister {} not found", canister_id) && error_code == "IC0301" ), "wrong error: {result:?}" ); // Cannot query canister status let result = ic00.canister_status(&canister_id).call_and_wait().await; assert!( match &result { Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::DestinationInvalid, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Canister {} not found", canister_id) && error_code == "IC0301" => { true } Ok((_status_call_result,)) => false, _ => false, }, "wrong error: {result:?}" ); // Delete a deleted canister should fail. let result = ic00.delete_canister(&canister_id).call_and_wait().await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse{ reject_code: RejectCode::DestinationInvalid, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Canister {} not found", canister_id) && error_code == "IC0301" ), "wrong error: {result:?}" ); Ok(()) }).await } #[tokio::test] async fn canister_lifecycle_as_wrong_controller() { with_agent(async move |pic, agent| { let ic00 = ManagementCanister::create(&agent); let (canister_id,) = ic00 .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; let canister_wasm = b"\0asm\x01\0\0\0".to_vec(); // Install once. ic00.install_code(&canister_id, &canister_wasm) .with_mode(CanisterInstallMode::Install) .call_and_wait() .await?; // Create another agent with different identity. let other_agent_identity = create_basic_identity(); let other_agent = create_agent(pic, other_agent_identity).await?; other_agent.fetch_root_key().await?; let other_ic00 = ManagementCanister::create(&other_agent); // Start as a wrong controller should fail. let result = other_ic00 .start_canister(&canister_id) .call_and_wait() .await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Only controllers of canister {} can call ic00 method start_canister", canister_id) && error_code == "IC0512" ), "wrong error: {result:?}" ); // Stop as a wrong controller should fail. let result = other_ic00.stop_canister(&canister_id).call_and_wait().await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), }, ..}) if *reject_message == format!("Only controllers of canister {} can call ic00 method stop_canister", canister_id) && error_code == "IC0512" ), "wrong error: {result:?}" ); // Get canister status as a wrong controller should fail. let result = other_ic00 .canister_status(&canister_id) .call_and_wait() .await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Only controllers of canister {canister_id} can call ic00 method canister_status") && error_code == "IC0512" ), "wrong error: {result:?}" ); // Delete as a wrong controller should fail. let result = other_ic00 .delete_canister(&canister_id) .call_and_wait() .await; assert!( matches!( &result, Err(AgentError::UncertifiedReject { reject: RejectResponse { reject_code: RejectCode::CanisterError, reject_message, error_code: Some(error_code), }, .. }) if *reject_message == format!("Only controllers of canister {canister_id} can call ic00 method delete_canister") && error_code == "IC0512" ), "wrong error: {result:?}" ); Ok(()) }).await } #[tokio::test] async fn provisional_create_canister_with_cycles() { with_wallet_canister(None, async move |pic, agent, wallet_id| { let default_canister_balance: u128 = 100_000_000_000_000; // empty cycle balance on create let wallet = WalletCanister::create(&agent, wallet_id).await?; #[derive(CandidType)] struct InCreate { cycles: u64, settings: CanisterSettings, } let create_args = InCreate { cycles: 0_u64, settings: CanisterSettings { controllers: None, compute_allocation: None, memory_allocation: None, freezing_threshold: None, reserved_cycles_limit: None, wasm_memory_limit: None, wasm_memory_threshold: None, log_visibility: None, environment_variables: None, }, }; let args = Argument::from_candid((create_args,)); let creation_fee = 500_000_000_000; let canister_initial_balance = 4_000_000_000; let (create_result,): (CreateResult,) = wallet .call( Principal::management_canister(), "create_canister", args, creation_fee + canister_initial_balance, ) .call_and_wait() .await?; let canister_id = create_result.canister_id; #[derive(CandidType)] struct In { canister_id: Principal, } let status_args = In { canister_id }; let args = Argument::from_candid((status_args,)); let (result,): (CanisterStatusResult,) = wallet .call(Principal::management_canister(), "canister_status", args, 0) .call_and_wait() .await?; assert!( result.cycles > 0_u64 && result.cycles < creation_fee + canister_initial_balance, "expected 0..{creation_fee}, got {}", result.cycles ); let ic00 = ManagementCanister::create(&agent); // cycle balance is default_canister_balance when creating with // provisional_create_canister_with_cycles(None) let (canister_id_1,) = ic00 .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; let result = ic00.canister_status(&canister_id_1).call_and_wait().await?; // assume some cycles are already burned let cycles: i128 = result.0.cycles.0.try_into().unwrap(); let burned = default_canister_balance as i128 - cycles; assert!(burned > 0 && burned < 100_000_000); // cycle balance should be amount specified to // provisional_create_canister_with_cycles call let amount: u128 = 1 << 40; // 1099511627776 let (canister_id_2,) = ic00 .create_canister() .as_provisional_create_with_amount(Some(amount)) .with_effective_canister_id(get_effective_canister_id(pic).await) .call_and_wait() .await?; let result = ic00.canister_status(&canister_id_2).call_and_wait().await?; let cycles: i128 = result.0.cycles.0.try_into().unwrap(); let burned = amount as i128 - cycles; assert!( burned > 0 && burned < 100_000_000, "expected 0..100_000_000, got {burned}" ); Ok(()) }) .await } #[tokio::test] async fn randomness() {
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
true
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils-bindgen-tests/build.rs
ic-utils-bindgen-tests/build.rs
use std::path::PathBuf; use candid::Principal; use ic_utils_bindgen::Config; fn main() { let dir = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); let base_cfg = dir.join("base.toml"); let icrc_did = dir.join("icrc1.did"); Config::new("icrc_runtime", &icrc_did) .runtime_callee() .set_type_selector_config(&base_cfg) .generate(); Config::new("icrc_static", &icrc_did) .static_callee("ryjl3-tyaaa-aaaaa-aaaba-cai".parse::<Principal>().unwrap()) .set_type_selector_config(&base_cfg) .generate(); Config::new("icrc_types", &icrc_did) .set_type_selector_config(&base_cfg) .generate(); }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils-bindgen-tests/src/lib.rs
ic-utils-bindgen-tests/src/lib.rs
pub mod icrc_runtime { include!(concat!(env!("OUT_DIR"), "/icrc_runtime.rs")); } pub mod icrc_static { include!(concat!(env!("OUT_DIR"), "/icrc_static.rs")); } pub mod icrc_types { include!(concat!(env!("OUT_DIR"), "/icrc_types.rs")); }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils-bindgen-tests/tests/bindgen_tests.rs
ic-utils-bindgen-tests/tests/bindgen_tests.rs
#![cfg(unix)] use std::{fs, sync::Arc}; use candid::{Encode, Principal}; use ic_agent::{identity::Secp256k1Identity, Agent, Identity}; use ic_utils_bindgen_tests::{icrc_runtime, icrc_static, icrc_types}; use pocket_ic::nonblocking::PocketIc; async fn with_ledger(f: impl AsyncFnOnce(&PocketIc, Principal, Agent, Arc<dyn Identity>)) { let identity = Secp256k1Identity::from_pem_file(format!( "{}/test_identity.pem", env!("CARGO_MANIFEST_DIR") )) .unwrap(); let identity = Arc::new(identity) as Arc<dyn Identity>; ref_tests::with_agent_as(identity.clone(), async move |pic, agent| { let canister = pic .create_canister_with_id(None, None, "ryjl3-tyaaa-aaaaa-aaaba-cai".parse().unwrap()) .await .unwrap(); pic.add_cycles(canister, 100_000_000_000_000).await; let init_args = icrc_types::Init { decimals: 8, minting_account: icrc_types::InitMintingAccount { owner: "aaaaa-aa".parse().unwrap(), subaccount: None, }, initial_mints: vec![icrc_types::InitInitialMintsItem { account: icrc_types::InitInitialMintsItemAccount { owner: identity.sender().unwrap(), subaccount: None, }, amount: 1_000_000_000_u128.into(), }], token_name: "TestToken".to_string(), token_symbol: "TT".to_string(), transfer_fee: 0_u128.into(), }; pic.install_canister( canister, fs::read(format!("{}/icrc1_ref.wasm", env!("CARGO_MANIFEST_DIR"))).unwrap(), Encode!(&init_args).unwrap(), None, ) .await; f(pic, canister, agent, identity).await; Ok(()) }) .await; } #[tokio::test] async fn test_runtime_principal() { with_ledger(async |_, canister, agent, identity| { let canister = icrc_runtime::Icrc1Ledger::new(&agent, canister); let (balance,) = canister .icrc1_balance_of(&icrc_runtime::Account { owner: identity.sender().unwrap(), subaccount: None, }) .await .unwrap(); assert_eq!(balance, 1_000_000_000_u128); let (res,) = canister .icrc1_transfer(&icrc_runtime::LedgerIcrc1TransferArg { to: icrc_runtime::Account { owner: Principal::anonymous(), subaccount: None, }, amount: 100_000_000_u128.into(), fee: None, memo: None, from_subaccount: None, created_at_time: None, }) .await .unwrap(); let _idx = res.unwrap(); let (rx_bal,) = canister .icrc1_balance_of(&icrc_runtime::Account { owner: Principal::anonymous(), subaccount: None, }) .await .unwrap(); assert_eq!(rx_bal, 100_000_000_u128); let (tx_bal,) = canister .icrc1_balance_of(&icrc_runtime::Account { owner: identity.sender().unwrap(), subaccount: None, }) .await .unwrap(); assert_eq!(tx_bal, 900_000_000_u128); }) .await; } #[tokio::test] async fn test_static_principal() { with_ledger(async |_, _canister, agent, identity| { let canister = icrc_static::Icrc1Ledger::new(&agent); let (balance,) = canister .icrc1_balance_of(&icrc_static::Account { owner: identity.sender().unwrap(), subaccount: None, }) .await .unwrap(); assert_eq!(balance, 1_000_000_000_u128); let (res,) = canister .icrc1_transfer(&icrc_static::LedgerIcrc1TransferArg { to: icrc_static::Account { owner: Principal::anonymous(), subaccount: None, }, amount: 100_000_000_u128.into(), fee: None, memo: None, from_subaccount: None, created_at_time: None, }) .await .unwrap(); let _idx = res.unwrap(); let (rx_bal,) = canister .icrc1_balance_of(&icrc_static::Account { owner: Principal::anonymous(), subaccount: None, }) .await .unwrap(); assert_eq!(rx_bal, 100_000_000_u128); let (tx_bal,) = canister .icrc1_balance_of(&icrc_static::Account { owner: identity.sender().unwrap(), subaccount: None, }) .await .unwrap(); assert_eq!(tx_bal, 900_000_000_u128); }) .await; }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils-bindgen/src/lib.rs
ic-utils-bindgen/src/lib.rs
#![allow(clippy::needless_doctest_main)] use candid::Principal; use candid_parser::bindings::rust::{ emit_bindgen, output_handlebar, Config as BindgenConfig, ExternalConfig, }; use candid_parser::configs::Configs; use candid_parser::pretty_check_file; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; use std::str::FromStr; /// Config for Candid to Rust bindings generation. /// /// # Choose Bindgen Modes /// /// The bindgen has the following modes: /// - Types only: Only the types definition will be generated. This is the default behavior with [`Self::new`]. /// - Static callee: The canister ID is known at compile time. Call [`Self::static_callee`] to set it. /// - Runtime callee: The canister ID is provided at runtime. Call [`Self::runtime_callee`] to set it. /// /// # Generate Bindings /// /// After configuring your bindgen settings through the methods above, you must call /// [`Self::generate`] to actually produce the Rust bindings. #[derive(Debug)] pub struct Config { canister_name: String, candid_path: PathBuf, mode: Mode, type_selector_config_path: Option<PathBuf>, // TODO: Implement type selector config } /// Bindgen mode. #[derive(Debug)] enum Mode { TypesOnly, StaticCallee { canister_id: Principal }, RuntimeCallee, } impl Config { /// Create a new `Config` instance. /// /// # Arguments /// - `canister_name` - The name of the canister. This will be used as the generated file name. /// It is important to ensure that this name is valid for use in a file system (no /// spaces, special characters, or other characters that could cause issues with file paths). /// - `candid_path` - The path to the Candid file. pub fn new<N, P>(canister_name: N, candid_path: P) -> Self where N: Into<String>, P: Into<PathBuf>, { Self { canister_name: canister_name.into(), candid_path: candid_path.into(), mode: Mode::TypesOnly, type_selector_config_path: None, } } /// Changes the bindgen mode to "Static callee", where the canister ID is known at compile time. /// /// This mode hardcodes the target canister ID in the generated code, making it suitable /// for deployments where the canister ID is fixed and known at compile time. /// /// # Arguments /// /// - `canister_id` - The Principal ID of the target canister pub fn static_callee<S>(&mut self, canister_id: S) -> &mut Self where S: Into<Principal>, { if !matches!(self.mode, Mode::TypesOnly) { panic!("The bindgen mode has already been set."); } self.mode = Mode::StaticCallee { canister_id: canister_id.into(), }; self } /// Changes the bindgen mode to "Runtime callee", where the canister ID is provided at runtime. pub fn runtime_callee(&mut self) -> &mut Self { if !matches!(self.mode, Mode::TypesOnly) { panic!("The bindgen mode has already been set."); } self.mode = Mode::RuntimeCallee; self } /// Sets the path to the type selector configuration file. /// /// The "type selector config" is a TOML file that specifies how certain Candid types /// should be mapped to Rust types (attributes, visibility, etc.). Please refer to the /// [specification](https://github.com/dfinity/candid/blob/master/spec/Type-selector.md#rust-binding-configuration) /// for more details. pub fn set_type_selector_config<P>(&mut self, path: P) -> &mut Self where P: Into<PathBuf>, { self.type_selector_config_path = Some(path.into()); self } /// Generate the bindings. /// /// The generated bindings will be written to the output directory specified by the /// `OUT_DIR` environment variable. The file will be named after the canister name. /// For example, if the canister name is "my_canister", the generated file will be /// located at `$OUT_DIR/my_canister.rs`. pub fn generate(&self) { let out_dir_str = std::env::var("OUT_DIR") .expect("OUT_DIR should always be set when execute the build.rs script"); let out_dir = PathBuf::from(out_dir_str); let generated_path = out_dir.join(format!("{}.rs", self.canister_name)); self.generate_to(&generated_path); } /// Generate the bindings to a user-specified path. pub fn generate_to(&self, path: &Path) { // 0. Load type selector config if provided let type_selector_configs_str = match &self.type_selector_config_path { Some(p) => { println!("cargo:rerun-if-changed={}", p.display()); fs::read_to_string(p).unwrap_or_else(|e| { panic!( "failed to read the type selector config file ({}): {}", p.display(), e ) }) } None => "".to_string(), }; let type_selector_configs = Configs::from_str(&type_selector_configs_str) .unwrap_or_else(|e| panic!("failed to parse the type selector config: {}", e)); let rust_bindgen_config = BindgenConfig::new(type_selector_configs); // 1. Parse the candid file and generate the Output (the struct for bindings) // This tells Cargo to re-run the build-script if the Candid file changes. println!("cargo:rerun-if-changed={}", self.candid_path.display()); let (env, actor, prog) = pretty_check_file(&self.candid_path).unwrap_or_else(|e| { panic!( "failed to parse candid file ({}): {}", self.candid_path.display(), e ) }); // unused are not handled let (output, _unused) = emit_bindgen(&rust_bindgen_config, &env, &actor, &prog); // 2. Generate the Rust bindings using the Handlebars template let mut external = ExternalConfig::default(); let content = match &self.mode { Mode::StaticCallee { canister_id } => { let template = include_str!("templates/static_callee.hbs"); external .0 .insert("canister_id".to_string(), canister_id.to_string()); output_handlebar(output, external, template) } Mode::RuntimeCallee => { let template = include_str!("templates/runtime_callee.hbs"); output_handlebar(output, external, template) } Mode::TypesOnly => { let template = include_str!("templates/types_only.hbs"); output_handlebar(output, external, template) } }; // 3. Write the generated Rust bindings to the output directory let mut file = fs::File::create(path).unwrap_or_else(|e| { panic!( "failed to create the output file ({}): {}", path.display(), e ) }); writeln!(file, "{content}").unwrap_or_else(|e| { panic!( "failed to write to the output file ({}): {}", path.display(), e ) }); } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/lib.rs
ic-utils/src/lib.rs
//! ic-utils is a collection of utilities to help build clients and canisters running //! on the Internet Computer. It is meant as a higher level tool. #![warn( missing_docs, missing_debug_implementations, elided_lifetimes_in_paths, rustdoc::broken_intra_doc_links, rustdoc::private_intra_doc_links )] #![cfg_attr(not(target_family = "wasm"), warn(clippy::future_not_send))] #![cfg_attr(docsrs, feature(doc_cfg))] /// Utilities to encapsulate calls to a canister. pub mod call; /// A higher-level canister type for managing various aspects of a canister. pub mod canister; /// A few known canister types for use with [`Canister`]. pub mod interfaces; pub use canister::{Argument, Canister};
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/call.rs
ic-utils/src/call.rs
use async_trait::async_trait; use candid::{decode_args, decode_one, utils::ArgumentDecoder, CandidType}; use ic_agent::{ agent::{CallResponse, UpdateBuilder}, export::Principal, Agent, AgentError, }; use serde::de::DeserializeOwned; use std::fmt; use std::future::{Future, IntoFuture}; use std::marker::PhantomData; use std::pin::Pin; mod expiry; pub use expiry::Expiry; /// A type that implements synchronous calls (ie. 'query' calls). #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] pub trait SyncCall: CallIntoFuture<Output = Result<Self::Value, AgentError>> { /// The return type of the Candid function being called. type Value: for<'de> ArgumentDecoder<'de> + Send; /// Execute the call, return an array of bytes directly from the canister. #[cfg(feature = "raw")] async fn call_raw(self) -> Result<Vec<u8>, AgentError>; /// Execute the call, returning either the value returned by the canister, or an /// error returned by the Agent. async fn call(self) -> Result<Self::Value, AgentError> where Self: Sized + Send, Self::Value: 'async_trait; } /// A type that implements asynchronous calls (ie. 'update' calls). /// This can call synchronous and return a [`RequestId`](ic_agent::RequestId), or it can wait for the result /// by polling the agent, and return a type. /// /// The return type must be a tuple type that represents all the values the return /// call should be returning. #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] pub trait AsyncCall: CallIntoFuture<Output = Result<Self::Value, AgentError>> { /// The return type of the Candid function being called. type Value: for<'de> ArgumentDecoder<'de> + Send; /// Execute the call, but returns the `RequestId`. Waiting on the request Id must be /// managed by the caller using the Agent directly. /// /// Since the return type is encoded in the trait itself, this can lead to types /// that are not compatible to `Out` when getting the result from the Request Id. /// For example, you might hold a [`AsyncCall<u8>`], use `call()` and poll for /// the result, and try to deserialize it as a [`String`]. This would be caught by /// Rust type system, but in this case it will be checked at runtime (as `RequestId` /// does not have a type associated with it). async fn call(self) -> Result<CallResponse<Self::Value>, AgentError>; /// Execute the call, and wait for an answer using an exponential-backoff strategy. The return /// type is encoded in the trait. async fn call_and_wait(self) -> Result<Self::Value, AgentError>; /// Apply a transformation function after the call has been successful. The transformation /// is applied with the result. /// #[cfg_attr(unix, doc = " ```rust")] // pocket-ic #[cfg_attr(not(unix), doc = " ```ignore")] /// # // This test is ignored because it requires an ic to be running. We run these /// # // in the ic-ref workflow. /// use ic_agent::Agent; /// # use ic_agent::identity::{Identity, BasicIdentity}; /// use ic_utils::{Canister, interfaces, call::AsyncCall}; /// use candid::{Encode, Decode, CandidType, Principal}; /// /// async fn create_a_canister() -> Result<Principal, Box<dyn std::error::Error>> { /// # Ok(ref_tests::utils::with_pic(async move |pic| { /// # let canister_wasm = b"\0asm\x01\0\0\0"; /// # fn create_identity() -> impl Identity { /// # BasicIdentity::from_raw_key(&[0u8;32]) /// # } /// # /// # let url = ref_tests::utils::get_pic_url(pic); /// # /// # let effective_id = Principal::from_text("rwlgt-iiaaa-aaaaa-aaaaa-cai").unwrap(); /// let agent = Agent::builder() /// .with_url(url) /// .with_identity(create_identity()) /// .build()?; /// agent.fetch_root_key().await?; /// let management_canister = interfaces::ManagementCanister::create(&agent); /// let management_canister = &management_canister; // needed for `async move` /// /// // Create a canister, then call the management canister to install a base canister /// // WASM. This is to show how this API would be used, but is probably not a good /// // real use case. /// let (canister_id,) = management_canister /// .create_canister() /// .as_provisional_create_with_amount(None) /// .with_effective_canister_id(effective_id) /// .and_then(|(canister_id,)| async move { /// management_canister /// .install_code(&canister_id, canister_wasm) /// .build() /// .unwrap() /// .await?; /// Ok((canister_id,)) /// }) /// .await?; /// /// Ok(canister_id) /// # }).await) /// } /// /// # let mut runtime = tokio::runtime::Runtime::new().unwrap(); /// # runtime.block_on(async { /// let canister_id = create_a_canister().await.unwrap(); /// eprintln!("{}", canister_id); /// # }); /// ``` fn and_then<'a, Out2, R, AndThen>( self, and_then: AndThen, ) -> AndThenAsyncCaller<'a, Self::Value, Out2, Self, R, AndThen> where Self: Sized + Send + 'a, Out2: for<'de> ArgumentDecoder<'de> + Send + 'a, R: Future<Output = Result<Out2, AgentError>> + Send + 'a, AndThen: Send + Fn(Self::Value) -> R + 'a, { AndThenAsyncCaller::new(self, and_then) } /// Apply a transformation function after the call has been successful. Equivalent to `.and_then(|x| async { map(x) })`. fn map<'a, Out, Map>(self, map: Map) -> MappedAsyncCaller<'a, Self::Value, Out, Self, Map> where Self: Sized + Send + 'a, Out: for<'de> ArgumentDecoder<'de> + Send + 'a, Map: Send + Fn(Self::Value) -> Out + 'a, { MappedAsyncCaller::new(self, map) } } #[cfg(target_family = "wasm")] pub(crate) type CallFuture<'a, T> = Pin<Box<dyn Future<Output = Result<T, AgentError>> + 'a>>; #[cfg(not(target_family = "wasm"))] pub(crate) type CallFuture<'a, T> = Pin<Box<dyn Future<Output = Result<T, AgentError>> + Send + 'a>>; #[cfg(not(target_family = "wasm"))] #[doc(hidden)] pub trait CallIntoFuture: IntoFuture<IntoFuture = <Self as CallIntoFuture>::IntoFuture> { type IntoFuture: Future<Output = Self::Output> + Send; } #[cfg(not(target_family = "wasm"))] impl<T> CallIntoFuture for T where T: IntoFuture + ?Sized, T::IntoFuture: Send, { type IntoFuture = T::IntoFuture; } #[cfg(target_family = "wasm")] use IntoFuture as CallIntoFuture; /// A synchronous call encapsulation. #[derive(Debug)] pub struct SyncCaller<'agent, Out> where Out: for<'de> ArgumentDecoder<'de> + Send, { pub(crate) agent: &'agent Agent, pub(crate) effective_canister_id: Principal, pub(crate) canister_id: Principal, pub(crate) method_name: String, pub(crate) arg: Result<Vec<u8>, AgentError>, pub(crate) expiry: Expiry, pub(crate) phantom_out: PhantomData<Out>, } impl<'agent, Out> SyncCaller<'agent, Out> where Out: for<'de> ArgumentDecoder<'de> + Send, { /// Perform the call, consuming the the abstraction. This is a private method. async fn call_raw(self) -> Result<Vec<u8>, AgentError> { let mut builder = self.agent.query(&self.canister_id, &self.method_name); builder = self.expiry.apply_to_query(builder); builder .with_arg(self.arg?) .with_effective_canister_id(self.effective_canister_id) .call() .await } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl<'agent, Out> SyncCall for SyncCaller<'agent, Out> where Self: Sized, Out: 'agent + for<'de> ArgumentDecoder<'de> + Send, { type Value = Out; #[cfg(feature = "raw")] async fn call_raw(self) -> Result<Vec<u8>, AgentError> { Ok(self.call_raw().await?) } async fn call(self) -> Result<Out, AgentError> { let result = self.call_raw().await?; decode_args(&result).map_err(|e| AgentError::CandidError(Box::new(e))) } } impl<'agent, Out> IntoFuture for SyncCaller<'agent, Out> where Self: Sized, Out: 'agent + for<'de> ArgumentDecoder<'de> + Send, { type IntoFuture = CallFuture<'agent, Out>; type Output = Result<Out, AgentError>; fn into_future(self) -> Self::IntoFuture { SyncCall::call(self) } } /// An async caller, encapsulating a call to an update method. #[derive(Debug)] pub struct AsyncCaller<'agent, Out> where Out: for<'de> ArgumentDecoder<'de> + Send, { pub(crate) agent: &'agent Agent, pub(crate) effective_canister_id: Principal, pub(crate) canister_id: Principal, pub(crate) method_name: String, pub(crate) arg: Result<Vec<u8>, AgentError>, pub(crate) expiry: Expiry, pub(crate) phantom_out: PhantomData<Out>, } impl<'agent, Out> AsyncCaller<'agent, Out> where Out: for<'de> ArgumentDecoder<'de> + Send + 'agent, { /// Build an `UpdateBuilder` call that can be used directly with the [Agent]. This is /// essentially downleveling this type into the lower level [ic-agent] abstraction. pub fn build_call(self) -> Result<UpdateBuilder<'agent>, AgentError> { let mut builder = self.agent.update(&self.canister_id, &self.method_name); builder = self.expiry.apply_to_update(builder); builder = builder .with_arg(self.arg?) .with_effective_canister_id(self.effective_canister_id); Ok(builder) } /// See [`AsyncCall::call`]. pub async fn call(self) -> Result<CallResponse<Out>, AgentError> { let response_bytes = match self.build_call()?.call().await? { CallResponse::Response((response_bytes, _)) => response_bytes, CallResponse::Poll(request_id) => return Ok(CallResponse::Poll(request_id)), }; let decoded_response = decode_args(&response_bytes).map_err(|e| AgentError::CandidError(Box::new(e)))?; Ok(CallResponse::Response(decoded_response)) } /// See [`AsyncCall::call_and_wait`]. pub async fn call_and_wait(self) -> Result<Out, AgentError> { self.build_call()? .call_and_wait() .await .and_then(|r| decode_args(&r).map_err(|e| AgentError::CandidError(Box::new(e)))) } /// Equivalent to calling [`AsyncCall::call_and_wait`] with the expected return type `(T,)`. pub async fn call_and_wait_one<T>(self) -> Result<T, AgentError> where T: DeserializeOwned + CandidType, { self.build_call()? .call_and_wait() .await .and_then(|r| decode_one(&r).map_err(|e| AgentError::CandidError(Box::new(e)))) } /// See [`AsyncCall::map`]. pub fn map<Out2, Map>(self, map: Map) -> MappedAsyncCaller<'agent, Out, Out2, Self, Map> where Out2: for<'de> ArgumentDecoder<'de> + Send, Map: Send + Fn(Out) -> Out2, { MappedAsyncCaller::new(self, map) } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl<'agent, Out> AsyncCall for AsyncCaller<'agent, Out> where Out: for<'de> ArgumentDecoder<'de> + Send + 'agent, { type Value = Out; async fn call(self) -> Result<CallResponse<Out>, AgentError> { self.call().await } async fn call_and_wait(self) -> Result<Out, AgentError> { self.call_and_wait().await } } impl<'agent, Out> IntoFuture for AsyncCaller<'agent, Out> where Out: for<'de> ArgumentDecoder<'de> + Send + 'agent, { type IntoFuture = CallFuture<'agent, Out>; type Output = Result<Out, AgentError>; fn into_future(self) -> Self::IntoFuture { AsyncCall::call_and_wait(self) } } /// An `AsyncCall` that applies a transform function to the result of the call. Because of /// constraints on the type system in Rust, both the input and output to the function must be /// deserializable. pub struct AndThenAsyncCaller< 'a, Out: for<'de> ArgumentDecoder<'de> + Send, Out2: for<'de> ArgumentDecoder<'de> + Send, Inner: AsyncCall<Value = Out> + Send + 'a, R: Future<Output = Result<Out2, AgentError>> + Send, AndThen: Send + Fn(Out) -> R, > { inner: Inner, and_then: AndThen, _out: PhantomData<Out>, _out2: PhantomData<Out2>, _lifetime: PhantomData<&'a ()>, } impl<'a, Out, Out2, Inner, R, AndThen> fmt::Debug for AndThenAsyncCaller<'a, Out, Out2, Inner, R, AndThen> where Out: for<'de> ArgumentDecoder<'de> + Send, Out2: for<'de> ArgumentDecoder<'de> + Send, Inner: AsyncCall<Value = Out> + Send + fmt::Debug + 'a, R: Future<Output = Result<Out2, AgentError>> + Send, AndThen: Send + Fn(Out) -> R + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("AndThenAsyncCaller") .field("inner", &self.inner) .field("and_then", &self.and_then) .field("_out", &self._out) .field("_out2", &self._out2) .finish() } } impl<'a, Out, Out2, Inner, R, AndThen> AndThenAsyncCaller<'a, Out, Out2, Inner, R, AndThen> where Out: for<'de> ArgumentDecoder<'de> + Send + 'a, Out2: for<'de> ArgumentDecoder<'de> + Send + 'a, Inner: AsyncCall<Value = Out> + Send + 'a, R: Future<Output = Result<Out2, AgentError>> + Send + 'a, AndThen: Send + Fn(Out) -> R + 'a, { /// Equivalent to `inner.and_then(and_then)`. pub fn new(inner: Inner, and_then: AndThen) -> Self { Self { inner, and_then, _out: PhantomData, _out2: PhantomData, _lifetime: PhantomData, } } /// See [`AsyncCall::call`]. pub async fn call(self) -> Result<CallResponse<Out2>, AgentError> { let raw_response = self.inner.call().await?; let response = match raw_response { CallResponse::Response(response_bytes) => { let mapped_response = (self.and_then)(response_bytes); CallResponse::Response(mapped_response.await?) } CallResponse::Poll(request_id) => CallResponse::Poll(request_id), }; Ok(response) } /// See [`AsyncCall::call_and_wait`]. pub async fn call_and_wait(self) -> Result<Out2, AgentError> { let v = self.inner.call_and_wait().await?; let f = (self.and_then)(v); f.await } /// See [`AsyncCall::and_then`]. pub fn and_then<Out3, R2, AndThen2>( self, and_then: AndThen2, ) -> AndThenAsyncCaller<'a, Out2, Out3, Self, R2, AndThen2> where Out3: for<'de> ArgumentDecoder<'de> + Send + 'a, R2: Future<Output = Result<Out3, AgentError>> + Send + 'a, AndThen2: Send + Fn(Out2) -> R2 + 'a, { AndThenAsyncCaller::new(self, and_then) } /// See [`AsyncCall::map`]. pub fn map<Out3, Map>(self, map: Map) -> MappedAsyncCaller<'a, Out2, Out3, Self, Map> where Out3: for<'de> ArgumentDecoder<'de> + Send, Map: Send + Fn(Out2) -> Out3, { MappedAsyncCaller::new(self, map) } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl<'a, Out, Out2, Inner, R, AndThen> AsyncCall for AndThenAsyncCaller<'a, Out, Out2, Inner, R, AndThen> where Out: for<'de> ArgumentDecoder<'de> + Send + 'a, Out2: for<'de> ArgumentDecoder<'de> + Send + 'a, Inner: AsyncCall<Value = Out> + Send + 'a, R: Future<Output = Result<Out2, AgentError>> + Send + 'a, AndThen: Send + Fn(Out) -> R + 'a, { type Value = Out2; async fn call(self) -> Result<CallResponse<Out2>, AgentError> { self.call().await } async fn call_and_wait(self) -> Result<Out2, AgentError> { self.call_and_wait().await } } impl<'a, Out, Out2, Inner, R, AndThen> IntoFuture for AndThenAsyncCaller<'a, Out, Out2, Inner, R, AndThen> where Out: for<'de> ArgumentDecoder<'de> + Send + 'a, Out2: for<'de> ArgumentDecoder<'de> + Send + 'a, Inner: AsyncCall<Value = Out> + Send + 'a, R: Future<Output = Result<Out2, AgentError>> + Send + 'a, AndThen: Send + Fn(Out) -> R + 'a, { type IntoFuture = CallFuture<'a, Out2>; type Output = Result<Out2, AgentError>; fn into_future(self) -> Self::IntoFuture { AsyncCall::call_and_wait(self) } } /// A structure that applies a transform function to the result of a call. Because of constraints /// on the type system in Rust, both the input and output to the function must be deserializable. pub struct MappedAsyncCaller< 'a, Out: for<'de> ArgumentDecoder<'de> + Send, Out2: for<'de> ArgumentDecoder<'de> + Send, Inner: AsyncCall<Value = Out> + Send + 'a, Map: Send + Fn(Out) -> Out2, > { inner: Inner, map: Map, _out: PhantomData<Out>, _out2: PhantomData<Out2>, _lifetime: PhantomData<&'a ()>, } impl<'a, Out, Out2, Inner, Map> fmt::Debug for MappedAsyncCaller<'a, Out, Out2, Inner, Map> where Out: for<'de> ArgumentDecoder<'de> + Send, Out2: for<'de> ArgumentDecoder<'de> + Send, Inner: AsyncCall<Value = Out> + Send + fmt::Debug + 'a, Map: Send + Fn(Out) -> Out2 + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MappedAsyncCaller") .field("inner", &self.inner) .field("map", &self.map) .field("_out", &self._out) .field("_out2", &self._out2) .finish() } } impl<'a, Out, Out2, Inner, Map> MappedAsyncCaller<'a, Out, Out2, Inner, Map> where Out: for<'de> ArgumentDecoder<'de> + Send, Out2: for<'de> ArgumentDecoder<'de> + Send, Inner: AsyncCall<Value = Out> + Send + 'a, Map: Send + Fn(Out) -> Out2, { /// Equivalent to `inner.map(map)`. pub fn new(inner: Inner, map: Map) -> Self { Self { inner, map, _out: PhantomData, _out2: PhantomData, _lifetime: PhantomData, } } /// See [`AsyncCall::call`]. pub async fn call(self) -> Result<CallResponse<Out2>, AgentError> { self.inner.call().await.map(|response| match response { CallResponse::Response(response_bytes) => { let mapped_response = (self.map)(response_bytes); CallResponse::Response(mapped_response) } CallResponse::Poll(request_id) => CallResponse::Poll(request_id), }) } /// See [`AsyncCall::call_and_wait`]. pub async fn call_and_wait(self) -> Result<Out2, AgentError> { let v = self.inner.call_and_wait().await?; Ok((self.map)(v)) } /// See [`AsyncCall::and_then`]. pub fn and_then<Out3, R2, AndThen2>( self, and_then: AndThen2, ) -> AndThenAsyncCaller<'a, Out2, Out3, Self, R2, AndThen2> where Out3: for<'de> ArgumentDecoder<'de> + Send + 'a, R2: Future<Output = Result<Out3, AgentError>> + Send + 'a, AndThen2: Send + Fn(Out2) -> R2 + 'a, { AndThenAsyncCaller::new(self, and_then) } /// See [`AsyncCall::map`]. pub fn map<Out3, Map2>(self, map: Map2) -> MappedAsyncCaller<'a, Out2, Out3, Self, Map2> where Out3: for<'de> ArgumentDecoder<'de> + Send, Map2: Send + Fn(Out2) -> Out3, { MappedAsyncCaller::new(self, map) } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl<'a, Out, Out2, Inner, Map> AsyncCall for MappedAsyncCaller<'a, Out, Out2, Inner, Map> where Out: for<'de> ArgumentDecoder<'de> + Send + 'a, Out2: for<'de> ArgumentDecoder<'de> + Send + 'a, Inner: AsyncCall<Value = Out> + Send + 'a, Map: Send + Fn(Out) -> Out2 + 'a, { type Value = Out2; async fn call(self) -> Result<CallResponse<Out2>, AgentError> { self.call().await } async fn call_and_wait(self) -> Result<Out2, AgentError> { self.call_and_wait().await } } impl<'a, Out, Out2, Inner, Map> IntoFuture for MappedAsyncCaller<'a, Out, Out2, Inner, Map> where Out: for<'de> ArgumentDecoder<'de> + Send + 'a, Out2: for<'de> ArgumentDecoder<'de> + Send + 'a, Inner: AsyncCall<Value = Out> + Send + 'a, Map: Send + Fn(Out) -> Out2 + 'a, { type IntoFuture = CallFuture<'a, Out2>; type Output = Result<Out2, AgentError>; fn into_future(self) -> Self::IntoFuture { AsyncCall::call_and_wait(self) } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/interfaces.rs
ic-utils/src/interfaces.rs
pub mod bitcoin_canister; pub mod http_request; pub mod management_canister; pub mod wallet; pub use bitcoin_canister::BitcoinCanister; pub use http_request::HttpRequestCanister; pub use management_canister::ManagementCanister; pub use wallet::WalletCanister;
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/canister.rs
ic-utils/src/canister.rs
use crate::call::{AsyncCaller, SyncCaller}; use candid::utils::ArgumentEncoder; use candid::{ser::IDLBuilder, types::value::IDLValue, utils::ArgumentDecoder, CandidType, Encode}; use ic_agent::{export::Principal, Agent, AgentError, RequestId}; use std::convert::TryInto; use thiserror::Error; /// An error happened while building a canister. #[derive(Debug, Error)] pub enum CanisterBuilderError { /// There was an error parsing the canister ID. #[error("Getting the Canister ID returned an error: {0}")] PrincipalError(#[from] Box<dyn std::error::Error + std::marker::Send + std::marker::Sync>), /// The agent was not provided. #[error("Must specify an Agent")] MustSpecifyAnAgent(), /// The canister ID was not provided. #[error("Must specify a Canister ID")] MustSpecifyCanisterId(), } /// A canister builder, which can be used to create a canister abstraction. #[derive(Debug, Default)] pub struct CanisterBuilder<'agent> { agent: Option<&'agent Agent>, canister_id: Option<Result<Principal, CanisterBuilderError>>, } impl<'agent> CanisterBuilder<'agent> { /// Create a canister builder with no value. pub fn new() -> CanisterBuilder<'static> { Default::default() } /// Attach a canister ID to this canister. pub fn with_canister_id<E, P>(self, canister_id: P) -> Self where E: 'static + std::error::Error + std::marker::Send + std::marker::Sync, P: TryInto<Principal, Error = E>, { Self { canister_id: Some( canister_id .try_into() .map_err(|e| CanisterBuilderError::PrincipalError(Box::new(e))), ), ..self } } /// Assign an agent to the canister being built. pub fn with_agent(self, agent: &'agent Agent) -> Self { CanisterBuilder { agent: Some(agent), ..self } } /// Create this canister abstraction after passing in all the necessary state. pub fn build(self) -> Result<Canister<'agent>, CanisterBuilderError> { let canister_id = if let Some(cid) = self.canister_id { cid? } else { return Err(CanisterBuilderError::MustSpecifyCanisterId()); }; let agent = self .agent .ok_or(CanisterBuilderError::MustSpecifyAnAgent())?; Ok(Canister { agent, canister_id }) } } /// Create an encapsulation of a Canister running on the Internet Computer. /// This supports making calls to methods, installing code if needed, and various /// utilities related to a canister. /// /// This is the higher level construct for talking to a canister on the Internet /// Computer. #[derive(Debug, Clone)] pub struct Canister<'agent> { pub(super) agent: &'agent Agent, pub(super) canister_id: Principal, } impl<'agent> Canister<'agent> { /// Get the canister ID of this canister. /// Prefer using [`canister_id`](Canister::canister_id) instead. pub fn canister_id_(&self) -> &Principal { &self.canister_id } /// Get the canister ID of this canister. pub fn canister_id(&self) -> &Principal { &self.canister_id } /// Create an `AsyncCallBuilder` to do an update call. /// Prefer using [`update`](Canister::update) instead. pub fn update_<'canister>( &'canister self, method_name: &str, ) -> AsyncCallBuilder<'agent, 'canister> { AsyncCallBuilder::new(self, method_name) } /// Create an `AsyncCallBuilder` to do an update call. pub fn update<'canister>( &'canister self, method_name: &str, ) -> AsyncCallBuilder<'agent, 'canister> { AsyncCallBuilder::new(self, method_name) } /// Create a `SyncCallBuilder` to do a query call. /// Prefer using [`query`](Canister::query) instead. pub fn query_<'canister>( &'canister self, method_name: &str, ) -> SyncCallBuilder<'agent, 'canister> { SyncCallBuilder::new(self, method_name) } /// Create a `SyncCallBuilder` to do a query call. pub fn query<'canister>( &'canister self, method_name: &str, ) -> SyncCallBuilder<'agent, 'canister> { SyncCallBuilder::new(self, method_name) } /// Call `request_status` on the `RequestId` in a loop and return the response as a byte vector. pub async fn wait<'canister>( &'canister self, request_id: &RequestId, ) -> Result<Vec<u8>, AgentError> { self.agent .wait(request_id, self.canister_id) .await .map(|x| x.0) } /// Creates a copy of this canister, changing the canister ID to the provided principal. /// Prefer using [`clone_with`](Canister::clone_with) instead. pub fn clone_with_(&self, id: Principal) -> Self { Self { agent: self.agent, canister_id: id, } } /// Creates a copy of this canister, changing the canister ID to the provided principal. pub fn clone_with(&self, id: Principal) -> Self { Self { agent: self.agent, canister_id: id, } } /// Create a `CanisterBuilder` instance to build a canister abstraction. pub fn builder() -> CanisterBuilder<'agent> { Default::default() } } /// A buffer to hold canister argument blob. #[derive(Debug, Default)] pub struct Argument(pub(crate) Option<Result<Vec<u8>, AgentError>>); impl Argument { /// Set an IDL Argument. Can only be called at most once. pub fn set_idl_arg<A: CandidType>(&mut self, arg: A) { match self.0 { None => self.0 = Some(Encode!(&arg).map_err(|e| e.into())), Some(_) => panic!("argument is being set more than once"), } } /// Set an `IDLValue` Argument. Can only be called at most once. pub fn set_value_arg(&mut self, arg: IDLValue) { match self.0 { None => { let mut builder = IDLBuilder::new(); let result = builder .value_arg(&arg) .and_then(|builder| builder.serialize_to_vec()) .map_err(Into::into); self.0 = Some(result); } Some(_) => panic!("argument is being set more than once"), } } /// Set the argument as raw. Can only be called at most once. pub fn set_raw_arg(&mut self, arg: Vec<u8>) { match self.0 { None => self.0 = Some(Ok(arg)), Some(_) => panic!("argument is being set more than once"), } } /// Return the argument blob. pub fn serialize(self) -> Result<Vec<u8>, AgentError> { self.0.unwrap_or_else(|| Ok(Encode!()?)) } /// Resets the argument to an empty message. pub fn reset(&mut self) { *self = Default::default(); } /// Creates an empty argument. pub fn new() -> Self { Default::default() } /// Creates an argument from an arbitrary blob. Equivalent to [`set_raw_arg`](Argument::set_raw_arg). pub fn from_raw(raw: Vec<u8>) -> Self { Self(Some(Ok(raw))) } /// Creates an argument from an existing Candid `ArgumentEncoder`. pub fn from_candid(tuple: impl ArgumentEncoder) -> Self { let mut builder = IDLBuilder::new(); let result = tuple .encode(&mut builder) .and_then(|_| builder.serialize_to_vec()) .map_err(Into::into); Self(Some(result)) } } /// A builder for a synchronous call (ie. query) to the Internet Computer. /// /// See [`SyncCaller`] for a description of this structure once built. #[derive(Debug)] pub struct SyncCallBuilder<'agent, 'canister> { canister: &'canister Canister<'agent>, method_name: String, effective_canister_id: Principal, arg: Argument, } impl<'agent: 'canister, 'canister> SyncCallBuilder<'agent, 'canister> { /// Create a new instance of an `AsyncCallBuilder`. pub(super) fn new<M: Into<String>>( canister: &'canister Canister<'agent>, method_name: M, ) -> Self { Self { canister, method_name: method_name.into(), effective_canister_id: canister.canister_id().to_owned(), arg: Default::default(), } } } impl<'agent: 'canister, 'canister> SyncCallBuilder<'agent, 'canister> { /// Set the argument with candid argument. Can be called at most once. pub fn with_arg<Argument>(mut self, arg: Argument) -> Self where Argument: CandidType + Sync + Send, { self.arg.set_idl_arg(arg); self } /// Set the argument with multiple arguments as tuple. Can be called at most once. pub fn with_args(mut self, tuple: impl ArgumentEncoder) -> Self { assert!(self.arg.0.is_none(), "argument is being set more than once"); self.arg = Argument::from_candid(tuple); self } /// Set the argument with `IDLValue` argument. Can be called at most once. /// /// TODO: make this method unnecessary ([#132](https://github.com/dfinity/agent-rs/issues/132)) pub fn with_value_arg(mut self, arg: IDLValue) -> Self { self.arg.set_value_arg(arg); self } /// Set the argument with raw argument bytes. Can be called at most once. pub fn with_arg_raw(mut self, arg: Vec<u8>) -> Self { self.arg.set_raw_arg(arg); self } /// Sets the [effective canister ID](https://internetcomputer.org/docs/references/current/ic-interface-spec#http-effective-canister-id) of the destination. pub fn with_effective_canister_id(mut self, canister_id: Principal) -> Self { self.effective_canister_id = canister_id; self } /// Builds a [`SyncCaller`] from this builder's state. pub fn build<Output>(self) -> SyncCaller<'agent, Output> where Output: for<'de> ArgumentDecoder<'de> + Send + Sync, { let c = self.canister; SyncCaller { agent: c.agent, effective_canister_id: self.effective_canister_id, canister_id: c.canister_id, method_name: self.method_name.clone(), arg: self.arg.serialize(), expiry: Default::default(), phantom_out: std::marker::PhantomData, } } } /// A builder for an asynchronous call (ie. update) to the Internet Computer. /// /// See [`AsyncCaller`] for a description of this structure. #[derive(Debug)] pub struct AsyncCallBuilder<'agent, 'canister> { canister: &'canister Canister<'agent>, method_name: String, effective_canister_id: Principal, arg: Argument, } impl<'agent: 'canister, 'canister> AsyncCallBuilder<'agent, 'canister> { /// Create a new instance of an `AsyncCallBuilder`. pub(super) fn new( canister: &'canister Canister<'agent>, method_name: &str, ) -> AsyncCallBuilder<'agent, 'canister> { Self { canister, method_name: method_name.to_string(), effective_canister_id: canister.canister_id().to_owned(), arg: Default::default(), } } } impl<'agent: 'canister, 'canister> AsyncCallBuilder<'agent, 'canister> { /// Set the argument with Candid argument. Can be called at most once. pub fn with_arg<Argument>(mut self, arg: Argument) -> Self where Argument: CandidType + Sync + Send, { self.arg.set_idl_arg(arg); self } /// Set the argument with multiple arguments as tuple. Can be called at most once. pub fn with_args(mut self, tuple: impl ArgumentEncoder) -> Self { assert!(self.arg.0.is_none(), "argument is being set more than once"); self.arg = Argument::from_candid(tuple); self } /// Set the argument with raw argument bytes. Can be called at most once. pub fn with_arg_raw(mut self, arg: Vec<u8>) -> Self { self.arg.set_raw_arg(arg); self } /// Sets the [effective canister ID](https://internetcomputer.org/docs/current/references/ic-interface-spec#http-effective-canister-id) of the destination. pub fn with_effective_canister_id(mut self, canister_id: Principal) -> Self { self.effective_canister_id = canister_id; self } /// Builds an [`AsyncCaller`] from this builder's state. pub fn build<Output>(self) -> AsyncCaller<'agent, Output> where Output: for<'de> ArgumentDecoder<'de> + Send + Sync, { let c = self.canister; AsyncCaller { agent: c.agent, effective_canister_id: self.effective_canister_id, canister_id: c.canister_id, method_name: self.method_name.clone(), arg: self.arg.serialize(), expiry: Default::default(), phantom_out: std::marker::PhantomData, } } } #[cfg(all(test, unix))] // pocket-ic mod tests { use super::*; use crate::call::AsyncCall; use crate::interfaces::ManagementCanister; #[tokio::test] async fn simple() { ref_tests::utils::with_agent(async move |pic, agent| { let management_canister = ManagementCanister::from_canister( Canister::builder() .with_agent(&agent) .with_canister_id("aaaaa-aa") .build() .unwrap(), ); let (new_canister_id,) = management_canister .create_canister() .as_provisional_create_with_amount(None) .with_effective_canister_id(ref_tests::utils::get_effective_canister_id(pic).await) .call_and_wait() .await .unwrap(); let (status,) = management_canister .canister_status(&new_canister_id) .call_and_wait() .await .unwrap(); assert_eq!(format!("{:?}", status.status), "Running"); let canister_wasm = b"\0asm\x01\0\0\0"; management_canister .install_code(&new_canister_id, canister_wasm) .call_and_wait() .await .unwrap(); let canister = Canister::builder() .with_agent(&agent) .with_canister_id(new_canister_id) .build() .unwrap(); assert!(canister .update("hello") .build::<()>() .call_and_wait() .await .is_err()); Ok(()) }) .await } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/call/expiry.rs
ic-utils/src/call/expiry.rs
use std::time::{Duration, SystemTime}; use ic_agent::agent::{QueryBuilder, UpdateBuilder}; use time::OffsetDateTime; /// An expiry value. Either not specified (the default), a delay relative to the time the /// call is made, or a specific date time. #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Default)] pub enum Expiry { /// Unspecified. Will not try to override the Agent's value, which might itself have /// its own default value. #[default] Unspecified, /// A duration that will be added to the system time when the call is made. Delay(Duration), /// A specific date and time to use for the expiry of the request. DateTime(OffsetDateTime), } impl Expiry { /// Create an expiry that happens after a duration. #[inline] pub fn after(d: Duration) -> Self { Self::Delay(d) } /// Set the expiry field to a specific date and time. #[inline] pub fn at(dt: impl Into<OffsetDateTime>) -> Self { Self::DateTime(dt.into()) } pub(crate) fn apply_to_update(self, u: UpdateBuilder<'_>) -> UpdateBuilder<'_> { match self { Expiry::Unspecified => u, Expiry::Delay(d) => u.expire_after(d), Expiry::DateTime(dt) => u.expire_at(dt), } } pub(crate) fn apply_to_query(self, u: QueryBuilder<'_>) -> QueryBuilder<'_> { match self { Expiry::Unspecified => u, Expiry::Delay(d) => u.expire_after(d), Expiry::DateTime(dt) => u.expire_at(dt), } } } impl From<Duration> for Expiry { fn from(d: Duration) -> Self { Self::Delay(d) } } impl From<SystemTime> for Expiry { fn from(dt: SystemTime) -> Self { Self::DateTime(dt.into()) } } impl From<OffsetDateTime> for Expiry { fn from(dt: OffsetDateTime) -> Self { Self::DateTime(dt) } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/interfaces/bitcoin_canister.rs
ic-utils/src/interfaces/bitcoin_canister.rs
//! The canister interface for the [Bitcoin canister](https://github.com/dfinity/bitcoin-canister). use std::ops::Deref; use candid::{CandidType, Principal}; use ic_agent::{Agent, AgentError}; use serde::Deserialize; use crate::{ call::{AsyncCall, SyncCall}, Canister, }; /// The canister interface for the IC [Bitcoin canister](https://github.com/dfinity/bitcoin-canister). #[derive(Debug)] pub struct BitcoinCanister<'agent> { canister: Canister<'agent>, network: BitcoinNetwork, } impl<'agent> Deref for BitcoinCanister<'agent> { type Target = Canister<'agent>; fn deref(&self) -> &Self::Target { &self.canister } } const MAINNET_ID: Principal = Principal::from_slice(&[0x00, 0x00, 0x00, 0x00, 0x01, 0xa0, 0x00, 0x04, 0x01, 0x01]); const TESTNET_ID: Principal = Principal::from_slice(&[0x00, 0x00, 0x00, 0x00, 0x01, 0xa0, 0x00, 0x01, 0x01, 0x01]); impl<'agent> BitcoinCanister<'agent> { /// Create a `BitcoinCanister` interface from an existing canister object. pub fn from_canister(canister: Canister<'agent>, network: BitcoinNetwork) -> Self { Self { canister, network } } /// Create a `BitcoinCanister` interface pointing to the specified canister ID. pub fn create(agent: &'agent Agent, canister_id: Principal, network: BitcoinNetwork) -> Self { Self::from_canister( Canister::builder() .with_agent(agent) .with_canister_id(canister_id) .build() .expect("all required fields should be set"), network, ) } /// Create a `BitcoinCanister` interface for the Bitcoin mainnet canister on the IC mainnet. pub fn mainnet(agent: &'agent Agent) -> Self { Self::for_network(agent, BitcoinNetwork::Mainnet).expect("valid network") } /// Create a `BitcoinCanister` interface for the Bitcoin testnet canister on the IC mainnet. pub fn testnet(agent: &'agent Agent) -> Self { Self::for_network(agent, BitcoinNetwork::Testnet).expect("valid network") } /// Create a `BitcoinCanister` interface for the specified Bitcoin network on the IC mainnet. Errors if `Regtest` is specified. pub fn for_network(agent: &'agent Agent, network: BitcoinNetwork) -> Result<Self, AgentError> { let canister_id = match network { BitcoinNetwork::Mainnet => MAINNET_ID, BitcoinNetwork::Testnet => TESTNET_ID, BitcoinNetwork::Regtest => { return Err(AgentError::MessageError( "No applicable canister ID for regtest".to_string(), )) } }; Ok(Self::create(agent, canister_id, network)) } /// Gets the BTC balance (in satoshis) of a particular Bitcoin address, filtering by number of confirmations. /// Most applications should require 6 confirmations. pub fn get_balance( &self, address: &str, min_confirmations: Option<u32>, ) -> impl 'agent + AsyncCall<Value = (u64,)> { self.update("bitcoin_get_balance") .with_arg(GetBalance { address, network: self.network, min_confirmations, }) .build() } /// Gets the BTC balance (in satoshis) of a particular Bitcoin address, filtering by number of confirmations. /// Most applications should require 6 confirmations. pub fn get_balance_query( &self, address: &str, min_confirmations: Option<u32>, ) -> impl 'agent + SyncCall<Value = (u64,)> { self.query("bitcoin_get_balance_query") .with_arg(GetBalance { address, network: self.network, min_confirmations, }) .build() } /// Fetch the list of [UTXOs](https://en.wikipedia.org/wiki/Unspent_transaction_output) for a Bitcoin address, /// filtering by number of confirmations. Most applications should require 6 confirmations. /// /// This method is paginated. If not all the results can be returned, then `next_page` will be set to `Some`, /// and its value can be passed to this method to get the next page. pub fn get_utxos( &self, address: &str, filter: Option<UtxosFilter>, ) -> impl 'agent + AsyncCall<Value = (GetUtxosResponse,)> { self.update("bitcoin_get_utxos") .with_arg(GetUtxos { address, network: self.network, filter, }) .build() } /// Fetch the list of [UTXOs](https://en.wikipedia.org/wiki/Unspent_transaction_output) for a Bitcoin address, /// filtering by number of confirmations. Most applications should require 6 confirmations. /// /// This method is paginated. If not all the results can be returned, then `next_page` will be set to `Some`, /// and its value can be passed to this method to get the next page. pub fn get_utxos_query( &self, address: &str, filter: Option<UtxosFilter>, ) -> impl 'agent + SyncCall<Value = (GetUtxosResponse,)> { self.query("bitcoin_get_utxos_query") .with_arg(GetUtxos { address, network: self.network, filter, }) .build() } /// Gets the transaction fee percentiles for the last 10,000 transactions. In the returned vector, `v[i]` is the `i`th percentile fee, /// measured in millisatoshis/vbyte, and `v[0]` is the smallest fee. pub fn get_current_fee_percentiles(&self) -> impl 'agent + AsyncCall<Value = (Vec<u64>,)> { #[derive(CandidType)] struct In { network: BitcoinNetwork, } self.update("bitcoin_get_current_fee_percentiles") .with_arg(In { network: self.network, }) .build() } /// Gets the block headers for the specified range of blocks. If `end_height` is `None`, the returned `tip_height` provides the tip at the moment /// the chain was queried. pub fn get_block_headers( &self, start_height: u32, end_height: Option<u32>, ) -> impl 'agent + AsyncCall<Value = (GetBlockHeadersResponse,)> { #[derive(CandidType)] struct In { start_height: u32, end_height: Option<u32>, } self.update("bitcoin_get_block_headers") .with_arg(In { start_height, end_height, }) .build() } /// Submits a new Bitcoin transaction. No guarantees are made about the outcome. pub fn send_transaction(&self, transaction: Vec<u8>) -> impl 'agent + AsyncCall<Value = ()> { #[derive(CandidType, Deserialize)] struct In { network: BitcoinNetwork, #[serde(with = "serde_bytes")] transaction: Vec<u8>, } self.update("bitcoin_send_transaction") .with_arg(In { network: self.network, transaction, }) .build() } } #[derive(Debug, CandidType)] struct GetBalance<'a> { address: &'a str, network: BitcoinNetwork, min_confirmations: Option<u32>, } #[derive(Debug, CandidType)] struct GetUtxos<'a> { address: &'a str, network: BitcoinNetwork, filter: Option<UtxosFilter>, } /// The Bitcoin network that a Bitcoin transaction is placed on. #[derive(Clone, Copy, Debug, CandidType, Deserialize, PartialEq, Eq)] pub enum BitcoinNetwork { /// The BTC network. #[serde(rename = "mainnet")] Mainnet, /// The TESTBTC network. #[serde(rename = "testnet")] Testnet, /// The REGTEST network. /// /// This is only available when developing with local replica. #[serde(rename = "regtest")] Regtest, } /// Defines how to filter results from [`BitcoinCanister::get_utxos_query`]. #[derive(Debug, Clone, CandidType, Deserialize)] pub enum UtxosFilter { /// Filter by the minimum number of UTXO confirmations. Most applications should set this to 6. #[serde(rename = "min_confirmations")] MinConfirmations(u32), /// When paginating results, use this page. Provided by [`GetUtxosResponse.next_page`](GetUtxosResponse). #[serde(rename = "page")] Page(#[serde(with = "serde_bytes")] Vec<u8>), } /// Unique output descriptor of a Bitcoin transaction. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct UtxoOutpoint { /// The ID of the transaction. Not necessarily unique on its own. #[serde(with = "serde_bytes")] pub txid: Vec<u8>, /// The index of the outpoint within the transaction. pub vout: u32, } /// A Bitcoin [`UTXO`](https://en.wikipedia.org/wiki/Unspent_transaction_output), produced by a transaction. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct Utxo { /// The transaction outpoint that produced this UTXO. pub outpoint: UtxoOutpoint, /// The BTC quantity, in satoshis. pub value: u64, /// The block index this transaction was placed at. pub height: u32, } /// Response type for the [`BitcoinCanister::get_utxos_query`] function. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct GetUtxosResponse { /// A list of UTXOs available for the specified address. pub utxos: Vec<Utxo>, /// The hash of the tip. #[serde(with = "serde_bytes")] pub tip_block_hash: Vec<u8>, /// The block index of the tip of the chain known to the IC. pub tip_height: u32, /// If `Some`, then `utxos` does not contain the entire results of the query. /// Call `bitcoin_get_utxos_query` again using `UtxosFilter::Page` for the next page of results. pub next_page: Option<Vec<u8>>, } /// Response type for the [`BitcoinCanister::get_block_headers`] function. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct GetBlockHeadersResponse { /// The tip of the chain, current to when the headers were fetched. pub tip_height: u32, /// The headers of the requested block range. pub block_headers: Vec<Vec<u8>>, }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/interfaces/http_request.rs
ic-utils/src/interfaces/http_request.rs
//! The canister interface for canisters that implement HTTP requests. use crate::{ call::{AsyncCall, SyncCall}, Canister, }; use candid::{ types::{ reference::FuncVisitor, value::{IDLValue, IDLValueVisitor}, Compound, Serializer, Type, TypeInner, }, CandidType, Deserialize, Func, }; use ic_agent::{export::Principal, Agent}; use std::{ borrow::Cow, convert::TryInto, fmt::Debug, marker::PhantomData, ops::{Deref, DerefMut}, }; /// A canister that can serve a HTTP request. #[derive(Debug, Clone)] pub struct HttpRequestCanister<'agent>(Canister<'agent>); impl<'agent> Deref for HttpRequestCanister<'agent> { type Target = Canister<'agent>; fn deref(&self) -> &Self::Target { &self.0 } } /// A key-value pair for a HTTP header. #[derive(Debug, CandidType, Clone, Deserialize)] pub struct HeaderField<'a>(pub Cow<'a, str>, pub Cow<'a, str>); /// The important components of an HTTP request. #[derive(Debug, Clone, CandidType)] struct HttpRequest<'a, H> { /// The HTTP method string. pub method: &'a str, /// The URL that was visited. pub url: &'a str, /// The request headers. pub headers: H, /// The request body. pub body: &'a [u8], /// The certificate version. pub certificate_version: Option<&'a u16>, } /// The important components of an HTTP update request. /// This is the same as `HttpRequest`, excluding the `certificate_version` property. #[derive(Debug, Clone, CandidType)] struct HttpUpdateRequest<'a, H> { /// The HTTP method string. pub method: &'a str, /// The URL that was visited. pub url: &'a str, /// The request headers. pub headers: H, /// The request body. pub body: &'a [u8], } /// A wrapper around an iterator of headers #[derive(Debug, Clone)] pub struct Headers<H>(H); impl<'a, H: Clone + ExactSizeIterator<Item = HeaderField<'a>>> From<H> for Headers<H> { fn from(h: H) -> Self { Headers(h) } } impl<'a, H: Clone + ExactSizeIterator<Item = HeaderField<'a>>> CandidType for Headers<H> { fn _ty() -> Type { TypeInner::Vec(HeaderField::ty()).into() } fn idl_serialize<S: Serializer>(&self, serializer: S) -> Result<(), S::Error> { let mut ser = serializer.serialize_vec(self.0.len())?; for e in self.0.clone() { Compound::serialize_element(&mut ser, &e)?; } Ok(()) } } /// A HTTP response. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct HttpResponse<Token = self::Token, Callback = HttpRequestStreamingCallback> { /// The HTTP status code. pub status_code: u16, /// The response header map. pub headers: Vec<HeaderField<'static>>, /// The response body. #[serde(with = "serde_bytes")] pub body: Vec<u8>, /// The strategy for streaming the rest of the data, if the full response is to be streamed. pub streaming_strategy: Option<StreamingStrategy<Token, Callback>>, /// Whether the query call should be upgraded to an update call. pub upgrade: Option<bool>, } impl<T1, C1> HttpResponse<T1, C1> { /// Convert another streaming strategy pub fn from<T2: Into<T1>, C2: Into<C1>>(v: HttpResponse<T2, C2>) -> Self { Self { status_code: v.status_code, headers: v.headers, body: v.body, streaming_strategy: v.streaming_strategy.map(StreamingStrategy::from), upgrade: v.upgrade, } } /// Convert this streaming strategy pub fn into<T2, C2>(self) -> HttpResponse<T2, C2> where T1: Into<T2>, C1: Into<C2>, { HttpResponse::from(self) } /// Attempt to convert another streaming strategy pub fn try_from<T2, C2, E>(v: HttpResponse<T2, C2>) -> Result<Self, E> where T2: TryInto<T1>, C2: TryInto<C1>, T2::Error: Into<E>, C2::Error: Into<E>, { Ok(Self { status_code: v.status_code, headers: v.headers, body: v.body, streaming_strategy: v .streaming_strategy .map(StreamingStrategy::try_from) .transpose()?, upgrade: v.upgrade, }) } /// Attempt to convert this streaming strategy pub fn try_into<T2, C2, E>(self) -> Result<HttpResponse<T2, C2>, E> where T1: TryInto<T2>, C1: TryInto<C2>, T1::Error: Into<E>, C1::Error: Into<E>, { HttpResponse::try_from(self) } } /// Possible strategies for a streaming response. #[derive(Debug, Clone, CandidType, Deserialize)] pub enum StreamingStrategy<Token = self::Token, Callback = HttpRequestStreamingCallback> { /// A callback-based streaming strategy, where a callback function is provided for continuing the stream. Callback(CallbackStrategy<Token, Callback>), } impl<T1, C1> StreamingStrategy<T1, C1> { /// Convert another streaming strategy pub fn from<T2: Into<T1>, C2: Into<C1>>(v: StreamingStrategy<T2, C2>) -> Self { match v { StreamingStrategy::Callback(c) => Self::Callback(c.into()), } } /// Convert this streaming strategy pub fn into<T2, C2>(self) -> StreamingStrategy<T2, C2> where T1: Into<T2>, C1: Into<C2>, { StreamingStrategy::from(self) } /// Attempt to convert another streaming strategy pub fn try_from<T2, C2, E>(v: StreamingStrategy<T2, C2>) -> Result<Self, E> where T2: TryInto<T1>, C2: TryInto<C1>, T2::Error: Into<E>, C2::Error: Into<E>, { Ok(match v { StreamingStrategy::Callback(c) => Self::Callback(c.try_into()?), }) } /// Attempt to convert this streaming strategy pub fn try_into<T2, C2, E>(self) -> Result<StreamingStrategy<T2, C2>, E> where T1: TryInto<T2>, C1: TryInto<C2>, T1::Error: Into<E>, C1::Error: Into<E>, { StreamingStrategy::try_from(self) } } /// A callback-token pair for a callback streaming strategy. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct CallbackStrategy<Token = self::Token, Callback = HttpRequestStreamingCallback> { /// The callback function to be called to continue the stream. pub callback: Callback, /// The token to pass to the function. pub token: Token, } impl<T1, C1> CallbackStrategy<T1, C1> { /// Convert another callback strategy pub fn from<T2: Into<T1>, C2: Into<C1>>(v: CallbackStrategy<T2, C2>) -> Self { Self { callback: v.callback.into(), token: v.token.into(), } } /// Convert this callback strategy pub fn into<T2, C2>(self) -> CallbackStrategy<T2, C2> where T1: Into<T2>, C1: Into<C2>, { CallbackStrategy::from(self) } /// Attempt to convert another callback strategy pub fn try_from<T2, C2, E>(v: CallbackStrategy<T2, C2>) -> Result<Self, E> where T2: TryInto<T1>, C2: TryInto<C1>, T2::Error: Into<E>, C2::Error: Into<E>, { Ok(Self { callback: v.callback.try_into().map_err(Into::into)?, token: v.token.try_into().map_err(Into::into)?, }) } /// Attempt to convert this callback strategy pub fn try_into<T2, C2, E>(self) -> Result<CallbackStrategy<T2, C2>, E> where T1: TryInto<T2>, C1: TryInto<C2>, T1::Error: Into<E>, C1::Error: Into<E>, { CallbackStrategy::try_from(self) } } /// A callback of any type, extremely permissive #[derive(Debug, Clone)] pub struct HttpRequestStreamingCallbackAny(pub Func); impl CandidType for HttpRequestStreamingCallbackAny { fn _ty() -> Type { TypeInner::Reserved.into() } fn idl_serialize<S: Serializer>(&self, _serializer: S) -> Result<(), S::Error> { // We cannot implement serialize, since our type must be `Reserved` in order to accept anything. // Attempting to serialize this type is always an error and should be regarded as a compile time error. unimplemented!("Callback is not serializable") } } impl<'de> Deserialize<'de> for HttpRequestStreamingCallbackAny { fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { // Ya know it says `ignored`, but what if we just didn't ignore it. deserializer.deserialize_ignored_any(FuncVisitor).map(Self) } } impl From<Func> for HttpRequestStreamingCallbackAny { fn from(f: Func) -> Self { Self(f) } } impl From<HttpRequestStreamingCallbackAny> for Func { fn from(c: HttpRequestStreamingCallbackAny) -> Self { c.0 } } /// A callback of type `shared query (Token) -> async StreamingCallbackHttpResponse` #[derive(Debug, Clone)] pub struct HttpRequestStreamingCallback<ArgToken = self::ArgToken>( pub Func, pub PhantomData<ArgToken>, ); impl<ArgToken: CandidType> CandidType for HttpRequestStreamingCallback<ArgToken> { fn _ty() -> Type { candid::func!((ArgToken) -> (StreamingCallbackHttpResponse::<ArgToken>) query) } fn idl_serialize<S: Serializer>(&self, serializer: S) -> Result<(), S::Error> { self.0.idl_serialize(serializer) } } impl<'de, ArgToken> Deserialize<'de> for HttpRequestStreamingCallback<ArgToken> { fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { Func::deserialize(deserializer).map(Self::from) } } impl<ArgToken> From<Func> for HttpRequestStreamingCallback<ArgToken> { fn from(f: Func) -> Self { Self(f, PhantomData) } } impl<ArgToken> From<HttpRequestStreamingCallback<ArgToken>> for Func { fn from(c: HttpRequestStreamingCallback<ArgToken>) -> Self { c.0 } } impl<ArgToken> Deref for HttpRequestStreamingCallback<ArgToken> { type Target = Func; fn deref(&self) -> &Func { &self.0 } } impl<ArgToken> DerefMut for HttpRequestStreamingCallback<ArgToken> { fn deref_mut(&mut self) -> &mut Func { &mut self.0 } } /// The next chunk of a streaming HTTP response. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct StreamingCallbackHttpResponse<Token = self::Token> { /// The body of the stream chunk. #[serde(with = "serde_bytes")] pub body: Vec<u8>, /// The new stream continuation token. pub token: Option<Token>, } /// A token for continuing a callback streaming strategy. This type cannot be serialized despite implementing `CandidType` #[derive(Debug, Clone, PartialEq)] pub struct Token(pub IDLValue); impl CandidType for Token { fn _ty() -> Type { TypeInner::Reserved.into() } fn idl_serialize<S: Serializer>(&self, _serializer: S) -> Result<(), S::Error> { // We cannot implement serialize, since our type must be `Reserved` in order to accept anything. // Attempting to serialize this type is always an error and should be regarded as a compile time error. unimplemented!("Token is not serializable") } } impl<'de> Deserialize<'de> for Token { fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { // Ya know it says `ignored`, but what if we just didn't ignore it. deserializer .deserialize_ignored_any(IDLValueVisitor) .map(Token) } } /// A marker type to match unconstrained callback arguments #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] pub struct ArgToken; impl CandidType for ArgToken { fn _ty() -> Type { TypeInner::Empty.into() } fn idl_serialize<S: Serializer>(&self, _serializer: S) -> Result<(), S::Error> { // We cannot implement serialize, since our type must be `Empty` in order to accept anything. // Attempting to serialize this type is always an error and should be regarded as a compile time error. unimplemented!("Token is not serializable") } } impl<'agent> HttpRequestCanister<'agent> { /// Create an instance of a `HttpRequestCanister` interface pointing to the specified Canister ID. pub fn create(agent: &'agent Agent, canister_id: Principal) -> Self { Self( Canister::builder() .with_agent(agent) .with_canister_id(canister_id) .build() .unwrap(), ) } /// Create a `HttpRequestCanister` interface from an existing canister object. pub fn from_canister(canister: Canister<'agent>) -> Self { Self(canister) } } impl<'agent> HttpRequestCanister<'agent> { /// Performs a HTTP request, receiving a HTTP response. pub fn http_request<'canister: 'agent>( &'canister self, method: impl AsRef<str>, url: impl AsRef<str>, headers: impl IntoIterator< Item = HeaderField<'agent>, IntoIter = impl 'agent + Send + Sync + Clone + ExactSizeIterator<Item = HeaderField<'agent>>, >, body: impl AsRef<[u8]>, certificate_version: Option<&u16>, ) -> impl 'agent + SyncCall<Value = (HttpResponse,)> { self.http_request_custom( method.as_ref(), url.as_ref(), headers.into_iter(), body.as_ref(), certificate_version, ) } /// Performs a HTTP request, receiving a HTTP response. /// `T` and `C` are the `token` and `callback` types for the `streaming_strategy`. pub fn http_request_custom<'canister: 'agent, H, T, C>( &'canister self, method: &str, url: &str, headers: H, body: &[u8], certificate_version: Option<&u16>, ) -> impl 'agent + SyncCall<Value = (HttpResponse<T, C>,)> where H: 'agent + Send + Sync + Clone + ExactSizeIterator<Item = HeaderField<'agent>>, T: 'agent + Send + Sync + CandidType + for<'de> Deserialize<'de>, C: 'agent + Send + Sync + CandidType + for<'de> Deserialize<'de>, { self.query("http_request") .with_arg(HttpRequest { method, url, headers: Headers(headers), body, certificate_version, }) .build() } /// Performs a HTTP request over an update call. Unlike query calls, update calls must pass consensus /// and therefore cannot be tampered with by a malicious node. pub fn http_request_update<'canister: 'agent>( &'canister self, method: impl AsRef<str>, url: impl AsRef<str>, headers: impl 'agent + Send + Sync + Clone + ExactSizeIterator<Item = HeaderField<'agent>>, body: impl AsRef<[u8]>, ) -> impl 'agent + AsyncCall<Value = (HttpResponse,)> { self.http_request_update_custom(method.as_ref(), url.as_ref(), headers, body.as_ref()) } /// Performs a HTTP request over an update call. Unlike query calls, update calls must pass consensus /// and therefore cannot be tampered with by a malicious node. /// `T` and `C` are the `token` and `callback` types for the `streaming_strategy`. pub fn http_request_update_custom<'canister: 'agent, H, T, C>( &'canister self, method: &str, url: &str, headers: H, body: &[u8], ) -> impl 'agent + AsyncCall<Value = (HttpResponse<T, C>,)> where H: 'agent + Send + Sync + Clone + ExactSizeIterator<Item = HeaderField<'agent>>, T: 'agent + Send + Sync + CandidType + for<'de> Deserialize<'de>, C: 'agent + Send + Sync + CandidType + for<'de> Deserialize<'de>, { self.update("http_request_update") .with_arg(HttpUpdateRequest { method, url, headers: Headers(headers), body, }) .build() } /// Retrieves the next chunk of a stream from a streaming callback, using the method from [`CallbackStrategy`]. pub fn http_request_stream_callback<'canister: 'agent>( &'canister self, method: impl AsRef<str>, token: Token, ) -> impl 'agent + SyncCall<Value = (StreamingCallbackHttpResponse,)> { self.query(method.as_ref()).with_value_arg(token.0).build() } /// Retrieves the next chunk of a stream from a streaming callback, using the method from [`CallbackStrategy`]. /// `T` is the `token` type. pub fn http_request_stream_callback_custom<'canister: 'agent, T>( &'canister self, method: impl AsRef<str>, token: T, ) -> impl 'agent + SyncCall<Value = (StreamingCallbackHttpResponse<T>,)> where T: 'agent + Send + Sync + CandidType + for<'de> Deserialize<'de>, { self.query(method.as_ref()).with_arg(token).build() } } #[cfg(test)] mod test { use crate::interfaces::http_request::HttpRequestStreamingCallbackAny; use super::{ CallbackStrategy, HttpRequestStreamingCallback, HttpResponse, StreamingCallbackHttpResponse, StreamingStrategy, Token, }; use candid::{ types::value::{IDLField, IDLValue}, CandidType, Decode, Deserialize, Encode, }; use serde::de::DeserializeOwned; mod pre_update_legacy { use candid::{define_function, CandidType, Deserialize, Nat}; use serde_bytes::ByteBuf; #[derive(CandidType, Deserialize)] pub struct Token { pub key: String, pub content_encoding: String, pub index: Nat, pub sha256: Option<ByteBuf>, } define_function!(pub CallbackFunc : () -> ()); #[derive(CandidType, Deserialize)] pub struct CallbackStrategy { pub callback: CallbackFunc, pub token: Token, } #[derive(CandidType, Clone, Deserialize)] pub struct HeaderField(pub String, pub String); #[derive(CandidType, Deserialize)] pub enum StreamingStrategy { Callback(CallbackStrategy), } #[derive(CandidType, Deserialize)] pub struct HttpResponse { pub status_code: u16, pub headers: Vec<HeaderField>, #[serde(with = "serde_bytes")] pub body: Vec<u8>, pub streaming_strategy: Option<StreamingStrategy>, } } #[test] fn deserialize_legacy_http_response() { let bytes: Vec<u8> = Encode!(&pre_update_legacy::HttpResponse { status_code: 100, headers: Vec::new(), body: Vec::new(), streaming_strategy: None, }) .unwrap(); let _response = Decode!(&bytes, HttpResponse).unwrap(); } #[test] fn deserialize_response_with_token() { use candid::{types::Label, Func, Principal}; fn decode<C: CandidType + DeserializeOwned>(bytes: &[u8]) { let response = Decode!(bytes, HttpResponse::<_, C>).unwrap(); assert_eq!(response.status_code, 100); let Some(StreamingStrategy::Callback(CallbackStrategy { token, .. })) = response.streaming_strategy else { panic!("streaming_strategy was missing"); }; let Token(IDLValue::Record(fields)) = token else { panic!("token type mismatched {token:?}"); }; assert!(fields.contains(&IDLField { id: Label::Named("key".into()), val: IDLValue::Text("foo".into()) })); assert!(fields.contains(&IDLField { id: Label::Named("content_encoding".into()), val: IDLValue::Text("bar".into()) })); assert!(fields.contains(&IDLField { id: Label::Named("index".into()), val: IDLValue::Nat(42u8.into()) })); assert!(fields.contains(&IDLField { id: Label::Named("sha256".into()), val: IDLValue::None })); } // Test if we can load legacy responses that use the `Func` workaround hack let bytes = Encode!(&HttpResponse { status_code: 100, headers: Vec::new(), body: Vec::new(), streaming_strategy: Some(StreamingStrategy::Callback(CallbackStrategy { callback: pre_update_legacy::CallbackFunc(Func { principal: Principal::from_text("2chl6-4hpzw-vqaaa-aaaaa-c").unwrap(), method: "callback".into() }), token: pre_update_legacy::Token { key: "foo".into(), content_encoding: "bar".into(), index: 42u8.into(), sha256: None, }, })), upgrade: None, }) .unwrap(); decode::<pre_update_legacy::CallbackFunc>(&bytes); decode::<HttpRequestStreamingCallbackAny>(&bytes); let bytes = Encode!(&HttpResponse { status_code: 100, headers: Vec::new(), body: Vec::new(), streaming_strategy: Some(StreamingStrategy::Callback(CallbackStrategy::< _, HttpRequestStreamingCallback, > { callback: Func { principal: Principal::from_text("2chl6-4hpzw-vqaaa-aaaaa-c").unwrap(), method: "callback".into() } .into(), token: pre_update_legacy::Token { key: "foo".into(), content_encoding: "bar".into(), index: 42u8.into(), sha256: None, }, })), upgrade: None, }) .unwrap(); decode::<HttpRequestStreamingCallback>(&bytes); decode::<HttpRequestStreamingCallbackAny>(&bytes); } #[test] fn deserialize_streaming_response_with_token() { use candid::types::Label; let bytes: Vec<u8> = Encode!(&StreamingCallbackHttpResponse { body: b"this is a body".as_ref().into(), token: Some(pre_update_legacy::Token { key: "foo".into(), content_encoding: "bar".into(), index: 42u8.into(), sha256: None, }), }) .unwrap(); let response = Decode!(&bytes, StreamingCallbackHttpResponse).unwrap(); assert_eq!(response.body, b"this is a body"); let Some(Token(IDLValue::Record(fields))) = response.token else { panic!("token type mismatched {:?}", response.token); }; assert!(fields.contains(&IDLField { id: Label::Named("key".into()), val: IDLValue::Text("foo".into()) })); assert!(fields.contains(&IDLField { id: Label::Named("content_encoding".into()), val: IDLValue::Text("bar".into()) })); assert!(fields.contains(&IDLField { id: Label::Named("index".into()), val: IDLValue::Nat(42u8.into()) })); assert!(fields.contains(&IDLField { id: Label::Named("sha256".into()), val: IDLValue::None })); } #[test] fn deserialize_streaming_response_without_token() { mod missing_token { use candid::{CandidType, Deserialize}; /// The next chunk of a streaming HTTP response. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct StreamingCallbackHttpResponse { /// The body of the stream chunk. #[serde(with = "serde_bytes")] pub body: Vec<u8>, } } let bytes: Vec<u8> = Encode!(&missing_token::StreamingCallbackHttpResponse { body: b"this is a body".as_ref().into(), }) .unwrap(); let response = Decode!(&bytes, StreamingCallbackHttpResponse).unwrap(); assert_eq!(response.body, b"this is a body"); assert_eq!(response.token, None); let bytes: Vec<u8> = Encode!(&StreamingCallbackHttpResponse { body: b"this is a body".as_ref().into(), token: Option::<pre_update_legacy::Token>::None, }) .unwrap(); let response = Decode!(&bytes, StreamingCallbackHttpResponse).unwrap(); assert_eq!(response.body, b"this is a body"); assert_eq!(response.token, None); } #[test] fn deserialize_with_enum_token() { #[derive(Debug, Clone, CandidType, Deserialize)] pub enum EnumToken { Foo, Bar, Baz, } #[derive(Debug, Clone, CandidType, Deserialize)] pub struct EmbedToken { value: String, other_value: EnumToken, } let bytes: Vec<u8> = Encode!(&StreamingCallbackHttpResponse { body: b"this is a body".as_ref().into(), token: Some(EnumToken::Foo), }) .unwrap(); let response = Decode!(&bytes, StreamingCallbackHttpResponse).unwrap(); assert_eq!(response.body, b"this is a body"); assert!(response.token.is_some()); let bytes: Vec<u8> = Encode!(&StreamingCallbackHttpResponse { body: b"this is a body".as_ref().into(), token: Option::<EnumToken>::None, }) .unwrap(); let response = Decode!(&bytes, StreamingCallbackHttpResponse).unwrap(); assert_eq!(response.body, b"this is a body"); assert_eq!(response.token, None); let bytes: Vec<u8> = Encode!(&StreamingCallbackHttpResponse { body: b"this is a body".as_ref().into(), token: Some(EmbedToken { value: "token string".into(), other_value: EnumToken::Foo }), }) .unwrap(); let response = Decode!(&bytes, StreamingCallbackHttpResponse).unwrap(); assert_eq!(response.body, b"this is a body"); assert!(response.token.is_some()); let bytes: Vec<u8> = Encode!(&StreamingCallbackHttpResponse { body: b"this is a body".as_ref().into(), token: Option::<EmbedToken>::None, }) .unwrap(); let response = Decode!(&bytes, StreamingCallbackHttpResponse).unwrap(); assert_eq!(response.body, b"this is a body"); assert_eq!(response.token, None); } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/interfaces/management_canister.rs
ic-utils/src/interfaces/management_canister.rs
//! The canister interface for the IC management canister. See the [specification][spec] for full documentation of the interface. //! //! [spec]: https://internetcomputer.org/docs/current/references/ic-interface-spec#ic-management-canister use crate::{ call::{AsyncCall, SyncCall}, Canister, }; use ic_agent::{export::Principal, Agent}; use ic_management_canister_types::{ CanisterIdRecord, DeleteCanisterSnapshotArgs, LoadCanisterSnapshotArgs, ProvisionalTopUpCanisterArgs, ReadCanisterSnapshotDataArgs, ReadCanisterSnapshotMetadataArgs, TakeCanisterSnapshotArgs, UploadCanisterSnapshotDataArgs, UploadCanisterSnapshotMetadataArgs, UploadChunkArgs, }; // Re-export the types that are used be defined in this file. pub use ic_management_canister_types::{ CanisterLogRecord, CanisterStatusResult, CanisterStatusType, CanisterTimer, ChunkHash, DefiniteCanisterSettings, FetchCanisterLogsResult, LogVisibility, OnLowWasmMemoryHookStatus, QueryStats, ReadCanisterSnapshotDataResult, ReadCanisterSnapshotMetadataResult, Snapshot, SnapshotDataKind, SnapshotDataOffset, SnapshotMetadataGlobal, SnapshotSource, StoredChunksResult, UploadCanisterSnapshotMetadataResult, UploadChunkResult, }; use std::{convert::AsRef, ops::Deref}; use strum_macros::{AsRefStr, Display, EnumString}; pub mod attributes; pub mod builders; #[doc(inline)] pub use builders::{ CreateCanisterBuilder, InstallBuilder, InstallChunkedCodeBuilder, InstallCodeBuilder, UpdateCanisterBuilder, }; /// The IC management canister. #[derive(Debug, Clone)] pub struct ManagementCanister<'agent>(Canister<'agent>); impl<'agent> Deref for ManagementCanister<'agent> { type Target = Canister<'agent>; fn deref(&self) -> &Self::Target { &self.0 } } /// All the known methods of the management canister. #[derive(AsRefStr, Debug, EnumString, Display)] #[strum(serialize_all = "snake_case")] pub enum MgmtMethod { /// See [`ManagementCanister::create_canister`]. CreateCanister, /// See [`ManagementCanister::install_code`]. InstallCode, /// See [`ManagementCanister::start_canister`]. StartCanister, /// See [`ManagementCanister::stop_canister`]. StopCanister, /// See [`ManagementCanister::canister_status`]. CanisterStatus, /// See [`ManagementCanister::delete_canister`]. DeleteCanister, /// See [`ManagementCanister::deposit_cycles`]. DepositCycles, /// See [`ManagementCanister::raw_rand`]. RawRand, /// See [`CreateCanisterBuilder::as_provisional_create_with_amount`]. ProvisionalCreateCanisterWithCycles, /// See [`ManagementCanister::provisional_top_up_canister`]. ProvisionalTopUpCanister, /// See [`ManagementCanister::uninstall_code`]. UninstallCode, /// See [`ManagementCanister::update_settings`]. UpdateSettings, /// See [`ManagementCanister::upload_chunk`]. UploadChunk, /// See [`ManagementCanister::clear_chunk_store`]. ClearChunkStore, /// See [`ManagementCanister::stored_chunks`]. StoredChunks, /// See [`ManagementCanister::install_chunked_code`]. InstallChunkedCode, /// See [`ManagementCanister::fetch_canister_logs`]. FetchCanisterLogs, /// See [`ManagementCanister::take_canister_snapshot`]. TakeCanisterSnapshot, /// See [`ManagementCanister::load_canister_snapshot`]. LoadCanisterSnapshot, /// See [`ManagementCanister::list_canister_snapshots`]. ListCanisterSnapshots, /// See [`ManagementCanister::delete_canister_snapshot`]. DeleteCanisterSnapshot, /// See [`ManagementCanister::read_canister_snapshot_metadata`]. ReadCanisterSnapshotMetadata, /// See [`ManagementCanister::read_canister_snapshot_data`]. ReadCanisterSnapshotData, /// See [`ManagementCanister::upload_canister_snapshot_metadata`]. UploadCanisterSnapshotMetadata, /// See [`ManagementCanister::upload_canister_snapshot_data`]. UploadCanisterSnapshotData, /// There is no corresponding agent function as only canisters can call it. EcdsaPublicKey, /// There is no corresponding agent function as only canisters can call it. SignWithEcdsa, /// There is no corresponding agent function as only canisters can call it. Use [`BitcoinCanister`](super::BitcoinCanister) instead. BitcoinGetBalance, /// There is no corresponding agent function as only canisters can call it. Use [`BitcoinCanister`](super::BitcoinCanister) instead. BitcoinGetUtxos, /// There is no corresponding agent function as only canisters can call it. Use [`BitcoinCanister`](super::BitcoinCanister) instead. BitcoinSendTransaction, /// There is no corresponding agent function as only canisters can call it. Use [`BitcoinCanister`](super::BitcoinCanister) instead. BitcoinGetCurrentFeePercentiles, /// There is no corresponding agent function as only canisters can call it. Use [`BitcoinCanister`](super::BitcoinCanister) instead. BitcoinGetBlockHeaders, /// There is no corresponding agent function as only canisters can call it. NodeMetricsHistory, /// There is no corresponding agent function as only canisters can call it. CanisterInfo, } impl<'agent> ManagementCanister<'agent> { /// Create an instance of a `ManagementCanister` interface pointing to the specified Canister ID. pub fn create(agent: &'agent Agent) -> Self { Self( Canister::builder() .with_agent(agent) .with_canister_id(Principal::management_canister()) .build() .unwrap(), ) } /// Create a `ManagementCanister` interface from an existing canister object. pub fn from_canister(canister: Canister<'agent>) -> Self { Self(canister) } } #[doc(hidden)] #[deprecated(since = "0.42.0", note = "Please use CanisterStatusResult instead")] pub type StatusCallResult = CanisterStatusResult; #[doc(hidden)] #[deprecated(since = "0.42.0", note = "Please use CanisterStatusType instead")] pub type CanisterStatus = CanisterStatusType; #[doc(hidden)] #[deprecated(since = "0.42.0", note = "Please use FetchCanisterLogsResult instead")] pub type FetchCanisterLogsResponse = FetchCanisterLogsResult; #[doc(hidden)] #[deprecated(since = "0.42.0", note = "Please use StoredChunksResult instead")] pub type StoreChunksResult = StoredChunksResult; #[doc(hidden)] #[deprecated( since = "0.42.0", note = "Please use ReadCanisterSnapshotMetadataResult instead" )] pub type SnapshotMetadata = ReadCanisterSnapshotMetadataResult; #[doc(hidden)] #[deprecated( since = "0.42.0", note = "Please use ReadCanisterSnapshotDataResult instead" )] pub type SnapshotDataResult = ReadCanisterSnapshotDataResult; #[doc(hidden)] #[deprecated( since = "0.42.0", note = "Please use UploadCanisterSnapshotMetadataResult instead" )] pub type CanisterSnapshotId = UploadCanisterSnapshotMetadataResult; impl<'agent> ManagementCanister<'agent> { /// Get the status of a canister. pub fn canister_status( &self, canister_id: &Principal, ) -> impl 'agent + AsyncCall<Value = (CanisterStatusResult,)> { self.update(MgmtMethod::CanisterStatus.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(canister_id.to_owned()) .build() .map(|result: (CanisterStatusResult,)| (result.0,)) } /// Create a canister. pub fn create_canister<'canister>(&'canister self) -> CreateCanisterBuilder<'agent, 'canister> { CreateCanisterBuilder::builder(self) } /// This method deposits the cycles included in this call into the specified canister. /// Only the controller of the canister can deposit cycles. pub fn deposit_cycles(&self, canister_id: &Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::DepositCycles.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(canister_id.to_owned()) .build() } /// Deletes a canister. pub fn delete_canister(&self, canister_id: &Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::DeleteCanister.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(canister_id.to_owned()) .build() } /// Until developers can convert real ICP tokens to a top up an existing canister, /// the system provides the `provisional_top_up_canister` method. /// It adds amount cycles to the balance of canister identified by amount /// (implicitly capping it at `MAX_CANISTER_BALANCE`). pub fn provisional_top_up_canister( &self, canister_id: &Principal, top_up_args: &ProvisionalTopUpCanisterArgs, ) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::ProvisionalTopUpCanister.as_ref()) .with_arg(top_up_args) .with_effective_canister_id(canister_id.to_owned()) .build() } /// This method takes no input and returns 32 pseudo-random bytes to the caller. /// The return value is unknown to any part of the IC at time of the submission of this call. /// A new return value is generated for each call to this method. pub fn raw_rand(&self) -> impl 'agent + AsyncCall<Value = (Vec<u8>,)> { self.update(MgmtMethod::RawRand.as_ref()) .build() .map(|result: (Vec<u8>,)| (result.0,)) } /// Starts a canister. pub fn start_canister(&self, canister_id: &Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::StartCanister.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(canister_id.to_owned()) .build() } /// Stop a canister. pub fn stop_canister(&self, canister_id: &Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::StopCanister.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(canister_id.to_owned()) .build() } /// This method removes a canister’s code and state, making the canister empty again. /// Only the controller of the canister can uninstall code. /// Uninstalling a canister’s code will reject all calls that the canister has not yet responded to, /// and drop the canister’s code and state. /// Outstanding responses to the canister will not be processed, even if they arrive after code has been installed again. /// The canister is now empty. In particular, any incoming or queued calls will be rejected. //// A canister after uninstalling retains its cycles balance, controller, status, and allocations. pub fn uninstall_code(&self, canister_id: &Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::UninstallCode.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(canister_id.to_owned()) .build() } /// Install a canister, with all the arguments necessary for creating the canister. pub fn install_code<'canister>( &'canister self, canister_id: &Principal, wasm: &'canister [u8], ) -> InstallCodeBuilder<'agent, 'canister> { InstallCodeBuilder::builder(self, canister_id, wasm) } /// Update one or more of a canisters settings (i.e its controller, compute allocation, or memory allocation.) pub fn update_settings<'canister>( &'canister self, canister_id: &Principal, ) -> UpdateCanisterBuilder<'agent, 'canister> { UpdateCanisterBuilder::builder(self, canister_id) } /// Upload a chunk of a WASM module to a canister's chunked WASM storage. pub fn upload_chunk( &self, canister_id: &Principal, upload_chunk_args: &UploadChunkArgs, ) -> impl 'agent + AsyncCall<Value = (UploadChunkResult,)> { self.update(MgmtMethod::UploadChunk.as_ref()) .with_arg(upload_chunk_args) .with_effective_canister_id(*canister_id) .build() } /// Clear a canister's chunked WASM storage. pub fn clear_chunk_store( &self, canister_id: &Principal, ) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::ClearChunkStore.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(*canister_id) .build() } /// Get a list of the hashes of a canister's stored WASM chunks pub fn stored_chunks( &self, canister_id: &Principal, ) -> impl 'agent + AsyncCall<Value = (StoredChunksResult,)> { self.update(MgmtMethod::StoredChunks.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(*canister_id) .build() } /// Install a canister module previously uploaded in chunks via [`upload_chunk`](Self::upload_chunk). pub fn install_chunked_code<'canister>( &'canister self, canister_id: &Principal, wasm_module_hash: &[u8], ) -> InstallChunkedCodeBuilder<'agent, 'canister> { InstallChunkedCodeBuilder::builder(self, *canister_id, wasm_module_hash) } /// Install a canister module, automatically selecting one-shot installation or chunked installation depending on module size. /// /// # Warnings /// /// This will clear chunked code storage if chunked installation is used. Do not use with canisters that you are manually uploading chunked code to. pub fn install<'canister: 'builder, 'builder>( &'canister self, canister_id: &Principal, wasm: &'builder [u8], ) -> InstallBuilder<'agent, 'canister, 'builder> { InstallBuilder::builder(self, canister_id, wasm) } /// Fetch the logs of a canister. pub fn fetch_canister_logs( &self, canister_id: &Principal, ) -> impl 'agent + SyncCall<Value = (FetchCanisterLogsResult,)> { // `fetch_canister_logs` is only supported in non-replicated mode. self.query(MgmtMethod::FetchCanisterLogs.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(*canister_id) .build() } /// Creates a canister snapshot, optionally replacing an existing snapshot. /// /// <div class="warning">Canisters should be stopped before running this method!</div> pub fn take_canister_snapshot( &self, canister_id: &Principal, take_args: &TakeCanisterSnapshotArgs, ) -> impl 'agent + AsyncCall<Value = (Snapshot,)> { self.update(MgmtMethod::TakeCanisterSnapshot.as_ref()) .with_arg(take_args) .with_effective_canister_id(*canister_id) .build() } /// Loads a canister snapshot by ID, replacing the canister's state with its state at the time the snapshot was taken. /// /// <div class="warning">Canisters should be stopped before running this method!</div> pub fn load_canister_snapshot( &self, canister_id: &Principal, load_args: &LoadCanisterSnapshotArgs, ) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::LoadCanisterSnapshot.as_ref()) .with_arg(load_args) .with_effective_canister_id(*canister_id) .build() } /// List a canister's recorded snapshots. pub fn list_canister_snapshots( &self, canister_id: &Principal, ) -> impl 'agent + AsyncCall<Value = (Vec<Snapshot>,)> { self.update(MgmtMethod::ListCanisterSnapshots.as_ref()) .with_arg(CanisterIdRecord { canister_id: *canister_id, }) .with_effective_canister_id(*canister_id) .build() } /// Deletes a recorded canister snapshot by ID. pub fn delete_canister_snapshot( &self, canister_id: &Principal, delete_args: &DeleteCanisterSnapshotArgs, ) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::DeleteCanisterSnapshot.as_ref()) .with_arg(delete_args) .with_effective_canister_id(*canister_id) .build() } /// Reads the metadata of a recorded canister snapshot by canister ID and snapshot ID. pub fn read_canister_snapshot_metadata( &self, canister_id: &Principal, metadata_args: &ReadCanisterSnapshotMetadataArgs, ) -> impl 'agent + AsyncCall<Value = (ReadCanisterSnapshotMetadataResult,)> { self.update(MgmtMethod::ReadCanisterSnapshotMetadata.as_ref()) .with_arg(metadata_args) .with_effective_canister_id(*canister_id) .build() } /// Reads the data of a recorded canister snapshot by canister ID and snapshot ID. pub fn read_canister_snapshot_data( &self, canister_id: &Principal, data_args: &ReadCanisterSnapshotDataArgs, ) -> impl 'agent + AsyncCall<Value = (ReadCanisterSnapshotDataResult,)> { self.update(MgmtMethod::ReadCanisterSnapshotData.as_ref()) .with_arg(data_args) .with_effective_canister_id(*canister_id) .build() } /// Uploads the metadata of a canister snapshot by canister ID. pub fn upload_canister_snapshot_metadata( &self, canister_id: &Principal, metadata_args: &UploadCanisterSnapshotMetadataArgs, ) -> impl 'agent + AsyncCall<Value = (UploadCanisterSnapshotMetadataResult,)> { self.update(MgmtMethod::UploadCanisterSnapshotMetadata.as_ref()) .with_arg(metadata_args) .with_effective_canister_id(*canister_id) .build() } /// Uploads the data of a canister snapshot by canister ID and snapshot ID.. pub fn upload_canister_snapshot_data( &self, canister_id: &Principal, data_args: &UploadCanisterSnapshotDataArgs, ) -> impl 'agent + AsyncCall<Value = ()> { self.update(MgmtMethod::UploadCanisterSnapshotData.as_ref()) .with_arg(data_args) .with_effective_canister_id(*canister_id) .build() } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/interfaces/wallet.rs
ic-utils/src/interfaces/wallet.rs
//! The canister interface for the [cycles wallet] canister. //! //! [cycles wallet]: https://github.com/dfinity/cycles-wallet use std::{ future::{Future, IntoFuture}, ops::Deref, }; use crate::{ call::{AsyncCall, CallFuture, SyncCall}, canister::Argument, interfaces::management_canister::{ attributes::{ComputeAllocation, FreezingThreshold, MemoryAllocation}, builders::CanisterSettings, }, Canister, }; use async_trait::async_trait; use candid::{decode_args, utils::ArgumentDecoder, CandidType, Deserialize, Nat}; use ic_agent::{agent::CallResponse, export::Principal, Agent, AgentError}; use once_cell::sync::Lazy; use semver::{Version, VersionReq}; const REPLICA_ERROR_NO_SUCH_QUERY_METHOD: &str = "has no query method 'wallet_api_version'"; const IC_REF_ERROR_NO_SUCH_QUERY_METHOD: &str = "query method does not exist"; /// An interface for forwarding a canister method call through the wallet canister via `wallet_canister_call`. #[derive(Debug)] pub struct CallForwarder<'agent, 'canister, Out> where Out: for<'de> ArgumentDecoder<'de> + Send + Sync, { wallet: &'canister WalletCanister<'agent>, destination: Principal, method_name: String, amount: u128, u128: bool, arg: Argument, phantom_out: std::marker::PhantomData<Out>, } /// A canister's settings. Similar to the canister settings struct from [`management_canister`](super::management_canister), /// but the management canister may evolve to have more settings without the wallet canister evolving to recognize them. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct CanisterSettingsV1 { /// The set of canister controllers. Controllers can update the canister via the management canister. pub controller: Option<Principal>, /// The allocation percentage (between 0 and 100 inclusive) for *guaranteed* compute capacity. pub compute_allocation: Option<Nat>, /// The allocation, in bytes (up to 256 TiB) that the canister is allowed to use for storage. pub memory_allocation: Option<Nat>, /// The IC will freeze a canister protectively if it will likely run out of cycles before this amount of time, in seconds (up to `u64::MAX`), has passed. pub freezing_threshold: Option<Nat>, } impl<'agent: 'canister, 'canister, Out> CallForwarder<'agent, 'canister, Out> where Out: for<'de> ArgumentDecoder<'de> + Send + Sync + 'agent, { /// Set the argument with candid argument. Can be called at most once. pub fn with_arg<Argument>(mut self, arg: Argument) -> Self where Argument: CandidType + Sync + Send, { self.arg.set_idl_arg(arg); self } /// Set the argument with multiple arguments as tuple. Can be called at most once. pub fn with_args(mut self, tuple: impl candid::utils::ArgumentEncoder) -> Self { if self.arg.0.is_some() { panic!("argument is being set more than once"); } self.arg = Argument::from_candid(tuple); self } /// Set the argument with raw argument bytes. Can be called at most once. pub fn with_arg_raw(mut self, arg: Vec<u8>) -> Self { self.arg.set_raw_arg(arg); self } /// Creates an [`AsyncCall`] implementation that, when called, will forward the specified canister call. pub fn build(self) -> Result<impl 'agent + AsyncCall<Value = Out>, AgentError> { #[derive(CandidType, Deserialize)] struct In<TCycles> { canister: Principal, method_name: String, #[serde(with = "serde_bytes")] args: Vec<u8>, cycles: TCycles, } Ok(if self.u128 { self.wallet.update("wallet_call128").with_arg(In { canister: self.destination, method_name: self.method_name, args: self.arg.serialize()?, cycles: self.amount, }) } else { self.wallet.update("wallet_call").with_arg(In { canister: self.destination, method_name: self.method_name, args: self.arg.serialize()?, cycles: u64::try_from(self.amount).map_err(|_| { AgentError::WalletUpgradeRequired( "The installed wallet does not support cycle counts >2^64-1".to_string(), ) })?, }) } .build() .and_then(|(result,): (Result<CallResult, String>,)| async move { let result = result.map_err(AgentError::WalletCallFailed)?; decode_args::<Out>(result.r#return.as_slice()) .map_err(|e| AgentError::CandidError(Box::new(e))) })) } /// Calls the forwarded canister call on the wallet canister. Equivalent to `.build().call()`. pub fn call(self) -> impl Future<Output = Result<CallResponse<Out>, AgentError>> + 'agent { let call = self.build(); async { call?.call().await } } /// Calls the forwarded canister call on the wallet canister, and waits for the result. Equivalent to `.build().call_and_wait()`. pub fn call_and_wait(self) -> impl Future<Output = Result<Out, AgentError>> + 'agent { let call = self.build(); async { call?.call_and_wait().await } } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl<'agent: 'canister, 'canister, Out> AsyncCall for CallForwarder<'agent, 'canister, Out> where Out: for<'de> ArgumentDecoder<'de> + Send + Sync + 'agent, { type Value = Out; async fn call(self) -> Result<CallResponse<Out>, AgentError> { self.call().await } async fn call_and_wait(self) -> Result<Out, AgentError> { self.call_and_wait().await } } impl<'agent: 'canister, 'canister, Out> IntoFuture for CallForwarder<'agent, 'canister, Out> where Out: for<'de> ArgumentDecoder<'de> + Send + Sync + 'agent, { type IntoFuture = CallFuture<'agent, Out>; type Output = Result<Out, AgentError>; fn into_future(self) -> Self::IntoFuture { Box::pin(self.call_and_wait()) } } /// A wallet canister interface, for the standard wallet provided by DFINITY. /// This interface implements most methods conveniently for the user. #[derive(Debug, Clone)] pub struct WalletCanister<'agent> { canister: Canister<'agent>, version: Version, } impl<'agent> Deref for WalletCanister<'agent> { type Target = Canister<'agent>; fn deref(&self) -> &Self::Target { &self.canister } } /// The possible kinds of events that can be stored in an [`Event`]. #[derive(CandidType, Debug, Deserialize)] pub enum EventKind<TCycles = u128> { /// Cycles were sent to a canister. CyclesSent { /// The canister the cycles were sent to. to: Principal, /// The number of cycles that were initially sent. amount: TCycles, /// The number of cycles that were refunded by the canister. refund: TCycles, }, /// Cycles were received from a canister. CyclesReceived { /// The canister that sent the cycles. from: Principal, /// The number of cycles received. amount: TCycles, /// The memo provided with the payment. memo: Option<String>, }, /// A known principal was added to the address book. AddressAdded { /// The principal that was added. id: Principal, /// The friendly name of the principal, if any. name: Option<String>, /// The significance of this principal to the wallet. role: Role, }, /// A principal was removed from the address book. AddressRemoved { /// The principal that was removed. id: Principal, }, /// A canister was created. CanisterCreated { /// The canister that was created. canister: Principal, /// The initial cycles balance that the canister was created with. cycles: TCycles, }, /// A call was forwarded to the canister. CanisterCalled { /// The canister that was called. canister: Principal, /// The name of the canister method that was called. method_name: String, /// The number of cycles that were supplied with the call. cycles: TCycles, }, } impl From<EventKind<u64>> for EventKind { fn from(kind: EventKind<u64>) -> Self { use EventKind::*; match kind { AddressAdded { id, name, role } => AddressAdded { id, name, role }, AddressRemoved { id } => AddressRemoved { id }, CanisterCalled { canister, cycles, method_name, } => CanisterCalled { canister, cycles: cycles.into(), method_name, }, CanisterCreated { canister, cycles } => CanisterCreated { canister, cycles: cycles.into(), }, CyclesReceived { amount, from, memo } => CyclesReceived { amount: amount.into(), from, memo, }, CyclesSent { amount, refund, to } => CyclesSent { amount: amount.into(), refund: refund.into(), to, }, } } } /// A transaction event tracked by the wallet's history feature. #[derive(CandidType, Debug, Deserialize)] pub struct Event<TCycles = u128> { /// An ID uniquely identifying this event. pub id: u32, /// The Unix timestamp that this event occurred at. pub timestamp: u64, /// The kind of event that occurred. pub kind: EventKind<TCycles>, } impl From<Event<u64>> for Event { fn from( Event { id, timestamp, kind, }: Event<u64>, ) -> Self { Self { id, timestamp, kind: kind.into(), } } } /// The significance of a principal in the wallet's address book. #[derive(CandidType, Debug, Deserialize)] pub enum Role { /// The principal has no particular significance, and is only there to be assigned a friendly name or be mentioned in the event log. Contact, /// The principal is a custodian of the wallet, and can therefore access the wallet, create canisters, and send and receive cycles. Custodian, /// The principal is a controller of the wallet, and can therefore access any wallet function or action. Controller, } /// The kind of principal that a particular principal is. #[derive(CandidType, Debug, Deserialize)] pub enum Kind { /// The kind of principal is unknown, such as the anonymous principal `2vxsx-fae`. Unknown, /// The principal belongs to an external user. User, /// The principal belongs to an IC canister. Canister, } /// An entry in the address book. #[derive(CandidType, Debug, Deserialize)] pub struct AddressEntry { /// The principal being identified. pub id: Principal, /// The friendly name for this principal, if one exists. pub name: Option<String>, /// The kind of principal it is. pub kind: Kind, /// The significance of this principal to the wallet canister. pub role: Role, } /// A canister that the wallet is responsible for managing. #[derive(CandidType, Debug, Deserialize)] pub struct ManagedCanisterInfo { /// The principal ID of the canister. pub id: Principal, /// The friendly name of the canister, if one has been set. pub name: Option<String>, /// The Unix timestamp that the canister was created at. pub created_at: u64, } /// The possible kinds of events that can be stored in a [`ManagedCanisterEvent`]. #[derive(CandidType, Debug, Deserialize)] pub enum ManagedCanisterEventKind<TCycles = u128> { /// Cycles were sent to the canister. CyclesSent { /// The number of cycles that were sent. amount: TCycles, /// The number of cycles that were refunded. refund: TCycles, }, /// A function call was forwarded to the canister. Called { /// The name of the function that was called. method_name: String, /// The number of cycles that were provided along with the call. cycles: TCycles, }, /// The canister was created. Created { /// The number of cycles the canister was created with. cycles: TCycles, }, } impl From<ManagedCanisterEventKind<u64>> for ManagedCanisterEventKind { fn from(event: ManagedCanisterEventKind<u64>) -> Self { use ManagedCanisterEventKind::*; match event { Called { cycles, method_name, } => Called { cycles: cycles.into(), method_name, }, Created { cycles } => Created { cycles: cycles.into(), }, CyclesSent { amount, refund } => CyclesSent { amount: amount.into(), refund: refund.into(), }, } } } /// A transaction event related to a [`ManagedCanisterInfo`]. #[derive(CandidType, Deserialize, Debug)] pub struct ManagedCanisterEvent<TCycles = u128> { /// The event ID. pub id: u32, /// The Unix timestamp the event occurred at. pub timestamp: u64, /// The kind of event that occurred. pub kind: ManagedCanisterEventKind<TCycles>, } impl From<ManagedCanisterEvent<u64>> for ManagedCanisterEvent { fn from( ManagedCanisterEvent { id, timestamp, kind, }: ManagedCanisterEvent<u64>, ) -> Self { Self { id, timestamp, kind: kind.into(), } } } /// The result of a balance request. #[derive(Debug, Copy, Clone, CandidType, Deserialize)] pub struct BalanceResult<TCycles = u128> { /// The balance of the wallet, in cycles. pub amount: TCycles, } /// The result of a canister creation request. #[derive(Debug, Copy, Clone, CandidType, Deserialize)] pub struct CreateResult { /// The principal ID of the newly created (empty) canister. pub canister_id: Principal, } /// The result of a call forwarding request. #[derive(Debug, Clone, CandidType, Deserialize)] pub struct CallResult { /// The encoded return value blob of the canister method. #[serde(with = "serde_bytes")] pub r#return: Vec<u8>, } impl<'agent> WalletCanister<'agent> { /// Create an instance of a `WalletCanister` interface pointing to the given Canister ID. Fails if it cannot learn the wallet's version. pub async fn create( agent: &'agent Agent, canister_id: Principal, ) -> Result<WalletCanister<'agent>, AgentError> { let canister = Canister::builder() .with_agent(agent) .with_canister_id(canister_id) .build() .unwrap(); Self::from_canister(canister).await } /// Create a `WalletCanister` interface from an existing canister object. Fails if it cannot learn the wallet's version. pub async fn from_canister( canister: Canister<'agent>, ) -> Result<WalletCanister<'agent>, AgentError> { static DEFAULT_VERSION: Lazy<Version> = Lazy::new(|| Version::parse("0.1.0").unwrap()); let version: Result<(String,), _> = canister.query("wallet_api_version").build().call().await; let version = match version { Err(AgentError::UncertifiedReject { reject: replica_error, .. }) if replica_error .reject_message .contains(REPLICA_ERROR_NO_SUCH_QUERY_METHOD) || replica_error .reject_message .contains(IC_REF_ERROR_NO_SUCH_QUERY_METHOD) => { DEFAULT_VERSION.clone() } version => Version::parse(&version?.0).unwrap_or_else(|_| DEFAULT_VERSION.clone()), }; Ok(Self { canister, version }) } /// Create a `WalletCanister` interface from an existing canister object and a known wallet version. /// /// This interface's methods may raise errors if the provided version is newer than the wallet's actual supported version. pub fn from_canister_with_version(canister: Canister<'agent>, version: Version) -> Self { Self { canister, version } } } impl<'agent> WalletCanister<'agent> { /// Re-fetch the API version string of the wallet. pub fn fetch_wallet_api_version(&self) -> impl 'agent + SyncCall<Value = (Option<String>,)> { self.query("wallet_api_version").build() } /// Get the (cached) API version of the wallet. pub fn wallet_api_version(&self) -> &Version { &self.version } /// Get the friendly name of the wallet (if one exists). pub fn name(&self) -> impl 'agent + SyncCall<Value = (Option<String>,)> { self.query("name").build() } /// Set the friendly name of the wallet. pub fn set_name(&self, name: String) -> impl 'agent + AsyncCall<Value = ()> { self.update("set_name").with_arg(name).build() } /// Get the current controller's principal ID. pub fn get_controllers(&self) -> impl 'agent + SyncCall<Value = (Vec<Principal>,)> { self.query("get_controllers").build() } /// Transfer controller to another principal ID. pub fn add_controller(&self, principal: Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update("add_controller").with_arg(principal).build() } /// Remove a user as a wallet controller. pub fn remove_controller(&self, principal: Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update("remove_controller").with_arg(principal).build() } /// Get the list of custodians. pub fn get_custodians(&self) -> impl 'agent + SyncCall<Value = (Vec<Principal>,)> { self.query("get_custodians").build() } /// Authorize a new custodian. pub fn authorize(&self, custodian: Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update("authorize").with_arg(custodian).build() } /// Deauthorize a custodian. pub fn deauthorize(&self, custodian: Principal) -> impl 'agent + AsyncCall<Value = ()> { self.update("deauthorize").with_arg(custodian).build() } /// Get the balance with the 64-bit API. pub fn wallet_balance64(&self) -> impl 'agent + SyncCall<Value = (BalanceResult<u64>,)> { self.query("wallet_balance").build() } /// Get the balance with the 128-bit API. pub fn wallet_balance128(&self) -> impl 'agent + SyncCall<Value = (BalanceResult,)> { self.query("wallet_balance128").build() } /// Get the balance. pub async fn wallet_balance(&self) -> Result<BalanceResult, AgentError> { if self.version_supports_u128_cycles() { self.wallet_balance128().call().await.map(|(r,)| r) } else { self.wallet_balance64() .call() .await .map(|(r,)| BalanceResult { amount: r.amount.into(), }) } } /// Send cycles to another canister using the 64-bit API. pub fn wallet_send64( &self, destination: Principal, amount: u64, ) -> impl 'agent + AsyncCall<Value = (Result<(), String>,)> { #[derive(CandidType)] struct In { canister: Principal, amount: u64, } self.update("wallet_send") .with_arg(In { canister: destination, amount, }) .build() } /// Send cycles to another canister using the 128-bit API. pub fn wallet_send128<'canister: 'agent>( &'canister self, destination: Principal, amount: u128, ) -> impl 'agent + AsyncCall<Value = (Result<(), String>,)> { #[derive(CandidType)] struct In { canister: Principal, amount: u128, } self.update("wallet_send128") .with_arg(In { canister: destination, amount, }) .build() } /// Send cycles to another canister. pub async fn wallet_send( &self, destination: Principal, amount: u128, ) -> Result<(), AgentError> { if self.version_supports_u128_cycles() { self.wallet_send128(destination, amount) .call_and_wait() .await? } else { let amount = u64::try_from(amount).map_err(|_| { AgentError::WalletUpgradeRequired( "The installed wallet does not support cycle counts >2^64-1.".to_string(), ) })?; self.wallet_send64(destination, amount) .call_and_wait() .await? } .0 .map_err(AgentError::WalletError) } /// A function for sending cycles to, so that a memo can be passed along with them. pub fn wallet_receive(&self, memo: Option<String>) -> impl 'agent + AsyncCall<Value = ((),)> { #[derive(CandidType)] struct In { memo: Option<String>, } self.update("wallet_receive") .with_arg(memo.map(|memo| In { memo: Some(memo) })) .build() } /// Create a canister through the wallet, using the single-controller 64-bit API. pub fn wallet_create_canister64_v1( &self, cycles: u64, controller: Option<Principal>, compute_allocation: Option<ComputeAllocation>, memory_allocation: Option<MemoryAllocation>, freezing_threshold: Option<FreezingThreshold>, ) -> impl 'agent + AsyncCall<Value = (Result<CreateResult, String>,)> { #[derive(CandidType)] struct In { cycles: u64, settings: CanisterSettingsV1, } let settings = CanisterSettingsV1 { controller, compute_allocation: compute_allocation.map(u8::from).map(Nat::from), memory_allocation: memory_allocation.map(u64::from).map(Nat::from), freezing_threshold: freezing_threshold.map(u64::from).map(Nat::from), }; self.update("wallet_create_canister") .with_arg(In { cycles, settings }) .build() .map(|result: (Result<CreateResult, String>,)| (result.0,)) } /// Create a canister through the wallet, using the multi-controller 64-bit API. pub fn wallet_create_canister64_v2( &self, cycles: u64, controllers: Option<Vec<Principal>>, compute_allocation: Option<ComputeAllocation>, memory_allocation: Option<MemoryAllocation>, freezing_threshold: Option<FreezingThreshold>, ) -> impl 'agent + AsyncCall<Value = (Result<CreateResult, String>,)> { #[derive(CandidType)] struct In { cycles: u64, settings: CanisterSettings, } let settings = CanisterSettings { controllers, compute_allocation: compute_allocation.map(u8::from).map(Nat::from), memory_allocation: memory_allocation.map(u64::from).map(Nat::from), freezing_threshold: freezing_threshold.map(u64::from).map(Nat::from), reserved_cycles_limit: None, wasm_memory_limit: None, wasm_memory_threshold: None, log_visibility: None, environment_variables: None, }; self.update("wallet_create_canister") .with_arg(In { cycles, settings }) .build() .map(|result: (Result<CreateResult, String>,)| (result.0,)) } /// Create a canister through the wallet, using the 128-bit API. pub fn wallet_create_canister128( &self, cycles: u128, controllers: Option<Vec<Principal>>, compute_allocation: Option<ComputeAllocation>, memory_allocation: Option<MemoryAllocation>, freezing_threshold: Option<FreezingThreshold>, ) -> impl 'agent + AsyncCall<Value = (Result<CreateResult, String>,)> { #[derive(CandidType)] struct In { cycles: u128, settings: CanisterSettings, } let settings = CanisterSettings { controllers, compute_allocation: compute_allocation.map(u8::from).map(Nat::from), memory_allocation: memory_allocation.map(u64::from).map(Nat::from), freezing_threshold: freezing_threshold.map(u64::from).map(Nat::from), reserved_cycles_limit: None, wasm_memory_limit: None, wasm_memory_threshold: None, log_visibility: None, environment_variables: None, }; self.update("wallet_create_canister128") .with_arg(In { cycles, settings }) .build() .map(|result: (Result<CreateResult, String>,)| (result.0,)) } /// Create a canister through the wallet. /// /// This method does not have a `reserved_cycles_limit` parameter, /// as the wallet does not support the setting. If you need to create a canister /// with a `reserved_cycles_limit` set, use the management canister. /// /// This method does not have a `wasm_memory_limit` or `log_visibility` parameter, /// as the wallet does not support the setting. If you need to create a canister /// with a `wasm_memory_limit` or `log_visibility` set, use the management canister. pub async fn wallet_create_canister( &self, cycles: u128, controllers: Option<Vec<Principal>>, compute_allocation: Option<ComputeAllocation>, memory_allocation: Option<MemoryAllocation>, freezing_threshold: Option<FreezingThreshold>, ) -> Result<CreateResult, AgentError> { if self.version_supports_u128_cycles() { self.wallet_create_canister128( cycles, controllers, compute_allocation, memory_allocation, freezing_threshold, ) .call_and_wait() .await? } else { let cycles = u64::try_from(cycles).map_err(|_| { AgentError::WalletUpgradeRequired( "The installed wallet does not support cycle counts >2^64-1.".to_string(), ) })?; if self.version_supports_multiple_controllers() { self.wallet_create_canister64_v2( cycles, controllers, compute_allocation, memory_allocation, freezing_threshold, ) .call_and_wait() .await? } else { let controller: Option<Principal> = match &controllers { Some(c) if c.len() == 1 => { let first: Option<&Principal> = c.first(); let first: Principal = *first.unwrap(); Ok(Some(first)) } Some(_) => Err(AgentError::WalletUpgradeRequired( "The installed wallet does not support multiple controllers.".to_string(), )), None => Ok(None), }?; self.wallet_create_canister64_v1( cycles, controller, compute_allocation, memory_allocation, freezing_threshold, ) .call_and_wait() .await? } } .0 .map_err(AgentError::WalletError) } /// Create a wallet canister with the single-controller 64-bit API. pub fn wallet_create_wallet64_v1( &self, cycles: u64, controller: Option<Principal>, compute_allocation: Option<ComputeAllocation>, memory_allocation: Option<MemoryAllocation>, freezing_threshold: Option<FreezingThreshold>, ) -> impl 'agent + AsyncCall<Value = (Result<CreateResult, String>,)> { #[derive(CandidType)] struct In { cycles: u64, settings: CanisterSettingsV1, } let settings = CanisterSettingsV1 { controller, compute_allocation: compute_allocation.map(u8::from).map(Nat::from), memory_allocation: memory_allocation.map(u64::from).map(Nat::from), freezing_threshold: freezing_threshold.map(u64::from).map(Nat::from), }; self.update("wallet_create_wallet") .with_arg(In { cycles, settings }) .build() .map(|result: (Result<CreateResult, String>,)| (result.0,)) } /// Create a wallet canister with the multi-controller 64-bit API. pub fn wallet_create_wallet64_v2( &self, cycles: u64, controllers: Option<Vec<Principal>>, compute_allocation: Option<ComputeAllocation>, memory_allocation: Option<MemoryAllocation>, freezing_threshold: Option<FreezingThreshold>, ) -> impl 'agent + AsyncCall<Value = (Result<CreateResult, String>,)> { #[derive(CandidType)] struct In { cycles: u64, settings: CanisterSettings, } let settings = CanisterSettings { controllers, compute_allocation: compute_allocation.map(u8::from).map(Nat::from), memory_allocation: memory_allocation.map(u64::from).map(Nat::from), freezing_threshold: freezing_threshold.map(u64::from).map(Nat::from), reserved_cycles_limit: None, wasm_memory_limit: None, wasm_memory_threshold: None, log_visibility: None, environment_variables: None, }; self.update("wallet_create_wallet") .with_arg(In { cycles, settings }) .build() .map(|result: (Result<CreateResult, String>,)| (result.0,)) } /// Create a wallet canister with the 128-bit API. pub fn wallet_create_wallet128( &self, cycles: u128, controllers: Option<Vec<Principal>>, compute_allocation: Option<ComputeAllocation>, memory_allocation: Option<MemoryAllocation>, freezing_threshold: Option<FreezingThreshold>, ) -> impl 'agent + AsyncCall<Value = (Result<CreateResult, String>,)> { #[derive(CandidType)] struct In { cycles: u128, settings: CanisterSettings, } let settings = CanisterSettings { controllers, compute_allocation: compute_allocation.map(u8::from).map(Nat::from), memory_allocation: memory_allocation.map(u64::from).map(Nat::from), freezing_threshold: freezing_threshold.map(u64::from).map(Nat::from), reserved_cycles_limit: None, wasm_memory_limit: None, wasm_memory_threshold: None, log_visibility: None, environment_variables: None, }; self.update("wallet_create_wallet128") .with_arg(In { cycles, settings }) .build() .map(|result: (Result<CreateResult, String>,)| (result.0,)) } /// Create a wallet canister. pub async fn wallet_create_wallet( &self, cycles: u128, controllers: Option<Vec<Principal>>, compute_allocation: Option<ComputeAllocation>, memory_allocation: Option<MemoryAllocation>, freezing_threshold: Option<FreezingThreshold>, ) -> Result<CreateResult, AgentError> { if self.version_supports_u128_cycles() { self.wallet_create_wallet128( cycles, controllers, compute_allocation, memory_allocation, freezing_threshold, ) .call_and_wait() .await? } else { let cycles = u64::try_from(cycles).map_err(|_| { AgentError::WalletUpgradeRequired( "The installed wallet does not support cycle counts >2^64-1.".to_string(), ) })?; if self.version_supports_multiple_controllers() { self.wallet_create_wallet64_v2( cycles, controllers, compute_allocation, memory_allocation, freezing_threshold,
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
true
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/interfaces/management_canister/builders.rs
ic-utils/src/interfaces/management_canister/builders.rs
//! Builder interfaces for some method calls of the management canister. #[doc(inline)] pub use super::attributes::{ ComputeAllocation, FreezingThreshold, MemoryAllocation, ReservedCyclesLimit, WasmMemoryLimit, }; use super::{ChunkHash, LogVisibility, ManagementCanister}; use crate::call::CallFuture; use crate::{ call::AsyncCall, canister::Argument, interfaces::management_canister::MgmtMethod, Canister, }; use async_trait::async_trait; use candid::{utils::ArgumentEncoder, CandidType, Deserialize, Nat}; use futures_util::{ future::ready, stream::{self, FuturesUnordered}, FutureExt, Stream, StreamExt, TryStreamExt, }; use ic_agent::{agent::CallResponse, export::Principal, AgentError}; pub use ic_management_canister_types::{ CanisterInstallMode, CanisterSettings, EnvironmentVariable, InstallCodeArgs, UpgradeFlags, WasmMemoryPersistence, }; use sha2::{Digest, Sha256}; use std::{ collections::BTreeSet, convert::{From, TryInto}, future::IntoFuture, pin::Pin, }; /// A builder for a `create_canister` call. #[derive(Debug)] pub struct CreateCanisterBuilder<'agent, 'canister: 'agent> { canister: &'canister Canister<'agent>, effective_canister_id: Principal, controllers: Option<Result<Vec<Principal>, AgentError>>, compute_allocation: Option<Result<ComputeAllocation, AgentError>>, memory_allocation: Option<Result<MemoryAllocation, AgentError>>, freezing_threshold: Option<Result<FreezingThreshold, AgentError>>, reserved_cycles_limit: Option<Result<ReservedCyclesLimit, AgentError>>, wasm_memory_limit: Option<Result<WasmMemoryLimit, AgentError>>, wasm_memory_threshold: Option<Result<WasmMemoryLimit, AgentError>>, log_visibility: Option<Result<LogVisibility, AgentError>>, environment_variables: Option<Result<Vec<EnvironmentVariable>, AgentError>>, is_provisional_create: bool, amount: Option<u128>, specified_id: Option<Principal>, } impl<'agent, 'canister: 'agent> CreateCanisterBuilder<'agent, 'canister> { /// Create an `CreateCanister` builder, which is also an `AsyncCall` implementation. pub fn builder(canister: &'canister Canister<'agent>) -> Self { Self { canister, effective_canister_id: Principal::management_canister(), controllers: None, compute_allocation: None, memory_allocation: None, freezing_threshold: None, reserved_cycles_limit: None, wasm_memory_limit: None, wasm_memory_threshold: None, log_visibility: None, environment_variables: None, is_provisional_create: false, amount: None, specified_id: None, } } /// Until developers can convert real ICP tokens to provision a new canister with cycles, /// the system provides the `provisional_create_canister_with_cycles` method. /// It behaves as `create_canister`, but initializes the canister’s balance with amount fresh cycles /// (using `MAX_CANISTER_BALANCE` if amount = null, else capping the balance at `MAX_CANISTER_BALANCE`). /// Cycles added to this call via `ic0.call_cycles_add` are returned to the caller. /// This method is only available in local development instances, and will be removed in the future. #[allow(clippy::wrong_self_convention)] pub fn as_provisional_create_with_amount(self, amount: Option<u128>) -> Self { Self { is_provisional_create: true, amount, ..self } } /// Specify the canister id. /// /// The `effective_canister_id` will also be set with the same value so that ic-ref can determine /// the target subnet of this request. The replica implementation ignores it. pub fn as_provisional_create_with_specified_id(self, specified_id: Principal) -> Self { Self { is_provisional_create: true, specified_id: Some(specified_id), effective_canister_id: specified_id, ..self } } /// Pass in an effective canister id for the update call. pub fn with_effective_canister_id<C, E>(self, effective_canister_id: C) -> Self where E: std::fmt::Display, C: TryInto<Principal, Error = E>, { match effective_canister_id.try_into() { Ok(effective_canister_id) => Self { effective_canister_id, ..self }, Err(_) => self, } } /// Pass in an optional controller for the canister. If this is [`None`], /// it will revert the controller to default. pub fn with_optional_controller<C, E>(self, controller: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<Principal, Error = E>, { let controller_to_add: Option<Result<Principal, _>> = controller.map(|ca| { ca.try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }); let controllers: Option<Result<Vec<Principal>, _>> = match (controller_to_add, self.controllers) { (_, Some(Err(sticky))) => Some(Err(sticky)), (Some(Err(e)), _) => Some(Err(e)), (None, _) => None, (Some(Ok(controller)), Some(Ok(controllers))) => { let mut controllers = controllers; controllers.push(controller); Some(Ok(controllers)) } (Some(Ok(controller)), None) => Some(Ok(vec![controller])), }; Self { controllers, ..self } } /// Pass in a designated controller for the canister. pub fn with_controller<C, E>(self, controller: C) -> Self where E: std::fmt::Display, C: TryInto<Principal, Error = E>, { self.with_optional_controller(Some(controller)) } /// Pass in a compute allocation optional value for the canister. If this is [`None`], /// it will revert the compute allocation to default. pub fn with_optional_compute_allocation<C, E>(self, compute_allocation: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<ComputeAllocation, Error = E>, { Self { compute_allocation: compute_allocation.map(|ca| { ca.try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }), ..self } } /// Pass in a compute allocation value for the canister. pub fn with_compute_allocation<C, E>(self, compute_allocation: C) -> Self where E: std::fmt::Display, C: TryInto<ComputeAllocation, Error = E>, { self.with_optional_compute_allocation(Some(compute_allocation)) } /// Pass in a memory allocation optional value for the canister. If this is [`None`], /// it will revert the memory allocation to default. pub fn with_optional_memory_allocation<E, C>(self, memory_allocation: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<MemoryAllocation, Error = E>, { Self { memory_allocation: memory_allocation.map(|ma| { ma.try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }), ..self } } /// Pass in a memory allocation value for the canister. pub fn with_memory_allocation<C, E>(self, memory_allocation: C) -> Self where E: std::fmt::Display, C: TryInto<MemoryAllocation, Error = E>, { self.with_optional_memory_allocation(Some(memory_allocation)) } /// Pass in a freezing threshold optional value for the canister. If this is [`None`], /// it will revert the freezing threshold to default. pub fn with_optional_freezing_threshold<E, C>(self, freezing_threshold: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<FreezingThreshold, Error = E>, { Self { freezing_threshold: freezing_threshold.map(|ma| { ma.try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }), ..self } } /// Pass in a freezing threshold value for the canister. pub fn with_freezing_threshold<C, E>(self, freezing_threshold: C) -> Self where E: std::fmt::Display, C: TryInto<FreezingThreshold, Error = E>, { self.with_optional_freezing_threshold(Some(freezing_threshold)) } /// Pass in a reserved cycles limit value for the canister. pub fn with_reserved_cycles_limit<C, E>(self, limit: C) -> Self where E: std::fmt::Display, C: TryInto<ReservedCyclesLimit, Error = E>, { self.with_optional_reserved_cycles_limit(Some(limit)) } /// Pass in a reserved cycles limit optional value for the canister. If this is [`None`], /// it will create the canister with the default limit. pub fn with_optional_reserved_cycles_limit<E, C>(self, limit: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<ReservedCyclesLimit, Error = E>, { Self { reserved_cycles_limit: limit.map(|limit| { limit .try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }), ..self } } /// Pass in a Wasm memory limit value for the canister. pub fn with_wasm_memory_limit<C, E>(self, wasm_memory_limit: C) -> Self where E: std::fmt::Display, C: TryInto<WasmMemoryLimit, Error = E>, { self.with_optional_wasm_memory_limit(Some(wasm_memory_limit)) } /// Pass in a Wasm memory limit optional value for the canister. If this is [`None`], /// it will revert the Wasm memory limit to default. pub fn with_optional_wasm_memory_limit<E, C>(self, wasm_memory_limit: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<WasmMemoryLimit, Error = E>, { Self { wasm_memory_limit: wasm_memory_limit.map(|limit| { limit .try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }), ..self } } /// Pass in a Wasm memory threshold value for the canister. pub fn with_wasm_memory_threshold<C, E>(self, wasm_memory_threshold: C) -> Self where E: std::fmt::Display, C: TryInto<WasmMemoryLimit, Error = E>, { self.with_optional_wasm_memory_threshold(Some(wasm_memory_threshold)) } /// Pass in a Wasm memory threshold optional value for the canister. If this is [`None`], /// it will revert the Wasm memory threshold to default. pub fn with_optional_wasm_memory_threshold<E, C>(self, wasm_memory_threshold: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<WasmMemoryLimit, Error = E>, { Self { wasm_memory_threshold: wasm_memory_threshold.map(|limit| { limit .try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }), ..self } } /// Pass in a log visibility setting for the canister. pub fn with_log_visibility<C, E>(self, log_visibility: C) -> Self where E: std::fmt::Display, C: TryInto<LogVisibility, Error = E>, { self.with_optional_log_visibility(Some(log_visibility)) } /// Pass in a log visibility optional setting for the canister. If this is [`None`], /// it will revert the log visibility to default. pub fn with_optional_log_visibility<E, C>(self, log_visibility: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<LogVisibility, Error = E>, { Self { log_visibility: log_visibility.map(|visibility| { visibility .try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }), ..self } } /// Pass in a environment variables setting for the canister. pub fn with_environment_variables<E, C>(self, environment_variables: C) -> Self where E: std::fmt::Display, C: TryInto<Vec<EnvironmentVariable>, Error = E>, { self.with_optional_environment_variables(Some(environment_variables)) } /// Pass in a environment variables optional setting for the canister. If this is [`None`], /// it will revert the environment variables to default. pub fn with_optional_environment_variables<E, C>(self, environment_variables: Option<C>) -> Self where E: std::fmt::Display, C: TryInto<Vec<EnvironmentVariable>, Error = E>, { Self { environment_variables: environment_variables.map(|vars| { vars.try_into() .map_err(|e| AgentError::MessageError(format!("{e}"))) }), ..self } } /// Create an [`AsyncCall`] implementation that, when called, will create a /// canister. pub fn build(self) -> Result<impl 'agent + AsyncCall<Value = (Principal,)>, AgentError> { let controllers = match self.controllers { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(x), None => None, }; let compute_allocation = match self.compute_allocation { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(Nat::from(u8::from(x))), None => None, }; let memory_allocation = match self.memory_allocation { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(Nat::from(u64::from(x))), None => None, }; let freezing_threshold = match self.freezing_threshold { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(Nat::from(u64::from(x))), None => None, }; let reserved_cycles_limit = match self.reserved_cycles_limit { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(Nat::from(u128::from(x))), None => None, }; let wasm_memory_limit = match self.wasm_memory_limit { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(Nat::from(u64::from(x))), None => None, }; let wasm_memory_threshold = match self.wasm_memory_threshold { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(Nat::from(u64::from(x))), None => None, }; let log_visibility = match self.log_visibility { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(x), None => None, }; let environment_variables = match self.environment_variables { Some(Err(x)) => return Err(AgentError::MessageError(format!("{x}"))), Some(Ok(x)) => Some(x), None => None, }; #[derive(Deserialize, CandidType)] struct Out { canister_id: Principal, } let async_builder = if self.is_provisional_create { #[derive(CandidType)] struct In { amount: Option<Nat>, settings: CanisterSettings, specified_id: Option<Principal>, } let in_arg = In { amount: self.amount.map(Nat::from), settings: CanisterSettings { controllers, compute_allocation, memory_allocation, freezing_threshold, reserved_cycles_limit, wasm_memory_limit, wasm_memory_threshold, log_visibility, environment_variables, }, specified_id: self.specified_id, }; self.canister .update(MgmtMethod::ProvisionalCreateCanisterWithCycles.as_ref()) .with_arg(in_arg) .with_effective_canister_id(self.effective_canister_id) } else { self.canister .update(MgmtMethod::CreateCanister.as_ref()) .with_arg(CanisterSettings { controllers, compute_allocation, memory_allocation, freezing_threshold, reserved_cycles_limit, wasm_memory_limit, wasm_memory_threshold, log_visibility, environment_variables, }) .with_effective_canister_id(self.effective_canister_id) }; Ok(async_builder .build() .map(|result: (Out,)| (result.0.canister_id,))) } /// Make a call. This is equivalent to the [`AsyncCall::call`]. pub async fn call(self) -> Result<CallResponse<(Principal,)>, AgentError> { self.build()?.call().await } /// Make a call. This is equivalent to the [`AsyncCall::call_and_wait`]. pub async fn call_and_wait(self) -> Result<(Principal,), AgentError> { self.build()?.call_and_wait().await } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl<'agent, 'canister: 'agent> AsyncCall for CreateCanisterBuilder<'agent, 'canister> { type Value = (Principal,); async fn call(self) -> Result<CallResponse<(Principal,)>, AgentError> { self.build()?.call().await } async fn call_and_wait(self) -> Result<(Principal,), AgentError> { self.build()?.call_and_wait().await } } impl<'agent, 'canister: 'agent> IntoFuture for CreateCanisterBuilder<'agent, 'canister> { type IntoFuture = CallFuture<'agent, (Principal,)>; type Output = Result<(Principal,), AgentError>; fn into_future(self) -> Self::IntoFuture { AsyncCall::call_and_wait(self) } } #[doc(hidden)] #[deprecated(since = "0.42.0", note = "Please use UpgradeFlags instead")] pub type CanisterUpgradeOptions = UpgradeFlags; #[doc(hidden)] #[deprecated(since = "0.42.0", note = "Please use CanisterInstallMode instead")] pub type InstallMode = CanisterInstallMode; #[doc(hidden)] #[deprecated(since = "0.42.0", note = "Please use InstallCodeArgs instead")] pub type CanisterInstall = InstallCodeArgs; /// A builder for an `install_code` call. #[derive(Debug)] pub struct InstallCodeBuilder<'agent, 'canister: 'agent> { canister: &'canister Canister<'agent>, canister_id: Principal, wasm: &'canister [u8], arg: Argument, mode: Option<CanisterInstallMode>, } impl<'agent, 'canister: 'agent> InstallCodeBuilder<'agent, 'canister> { /// Create an `InstallCode` builder, which is also an `AsyncCall` implementation. pub fn builder( canister: &'canister Canister<'agent>, canister_id: &Principal, wasm: &'canister [u8], ) -> Self { Self { canister, canister_id: *canister_id, wasm, arg: Default::default(), mode: None, } } /// Set the argument to the installation, which will be passed to the init /// method of the canister. Can be called at most once. pub fn with_arg<Argument: CandidType>( mut self, arg: Argument, ) -> InstallCodeBuilder<'agent, 'canister> { self.arg.set_idl_arg(arg); self } /// Set the argument with multiple arguments as tuple to the installation, /// which will be passed to the init method of the canister. Can be called at most once. pub fn with_args(mut self, tuple: impl ArgumentEncoder) -> Self { assert!(self.arg.0.is_none(), "argument is being set more than once"); self.arg = Argument::from_candid(tuple); self } /// Set the argument passed in to the canister with raw bytes. Can be called at most once. pub fn with_raw_arg(mut self, arg: Vec<u8>) -> InstallCodeBuilder<'agent, 'canister> { self.arg.set_raw_arg(arg); self } /// Pass in the [`CanisterInstallMode`]. pub fn with_mode(self, mode: CanisterInstallMode) -> Self { Self { mode: Some(mode), ..self } } /// Create an [`AsyncCall`] implementation that, when called, will install the /// canister. pub fn build(self) -> Result<impl 'agent + AsyncCall<Value = ()>, AgentError> { Ok(self .canister .update(MgmtMethod::InstallCode.as_ref()) .with_arg(InstallCodeArgs { mode: self.mode.unwrap_or(CanisterInstallMode::Install), canister_id: self.canister_id, wasm_module: self.wasm.to_owned(), arg: self.arg.serialize()?, sender_canister_version: None, }) .with_effective_canister_id(self.canister_id) .build()) } /// Make a call. This is equivalent to the [`AsyncCall::call`]. pub async fn call(self) -> Result<CallResponse<()>, AgentError> { self.build()?.call().await } /// Make a call. This is equivalent to the [`AsyncCall::call_and_wait`]. pub async fn call_and_wait(self) -> Result<(), AgentError> { self.build()?.call_and_wait().await } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl<'agent, 'canister: 'agent> AsyncCall for InstallCodeBuilder<'agent, 'canister> { type Value = (); async fn call(self) -> Result<CallResponse<()>, AgentError> { self.build()?.call().await } async fn call_and_wait(self) -> Result<(), AgentError> { self.build()?.call_and_wait().await } } impl<'agent, 'canister: 'agent> IntoFuture for InstallCodeBuilder<'agent, 'canister> { type IntoFuture = CallFuture<'agent, ()>; type Output = Result<(), AgentError>; fn into_future(self) -> Self::IntoFuture { AsyncCall::call_and_wait(self) } } /// A builder for an `install_chunked_code` call. #[derive(Debug)] pub struct InstallChunkedCodeBuilder<'agent, 'canister> { canister: &'canister Canister<'agent>, target_canister: Principal, store_canister: Option<Principal>, chunk_hashes_list: Vec<ChunkHash>, wasm_module_hash: Vec<u8>, arg: Argument, mode: CanisterInstallMode, } impl<'agent: 'canister, 'canister> InstallChunkedCodeBuilder<'agent, 'canister> { /// Create an `InstallChunkedCodeBuilder`. pub fn builder( canister: &'canister Canister<'agent>, target_canister: Principal, wasm_module_hash: &[u8], ) -> Self { Self { canister, target_canister, wasm_module_hash: wasm_module_hash.to_vec(), store_canister: None, chunk_hashes_list: vec![], arg: Argument::new(), mode: CanisterInstallMode::Install, } } /// Set the chunks to install. These must previously have been set with [`ManagementCanister::upload_chunk`]. pub fn with_chunk_hashes(mut self, chunk_hashes: Vec<ChunkHash>) -> Self { self.chunk_hashes_list = chunk_hashes; self } /// Set the canister to pull uploaded chunks from. By default this is the same as the target canister. pub fn with_store_canister(mut self, store_canister: Principal) -> Self { self.store_canister = Some(store_canister); self } /// Set the argument to the installation, which will be passed to the init /// method of the canister. Can be called at most once. pub fn with_arg(mut self, argument: impl CandidType) -> Self { self.arg.set_idl_arg(argument); self } /// Set the argument with multiple arguments as tuple to the installation, /// which will be passed to the init method of the canister. Can be called at most once. pub fn with_args(mut self, argument: impl ArgumentEncoder) -> Self { assert!(self.arg.0.is_none(), "argument is being set more than once"); self.arg = Argument::from_candid(argument); self } /// Set the argument passed in to the canister with raw bytes. Can be called at most once. pub fn with_raw_arg(mut self, argument: Vec<u8>) -> Self { self.arg.set_raw_arg(argument); self } /// Set the [`CanisterInstallMode`]. pub fn with_install_mode(mut self, mode: CanisterInstallMode) -> Self { self.mode = mode; self } /// Create an [`AsyncCall`] implementation that, when called, will install the canister. pub fn build(self) -> Result<impl 'agent + AsyncCall<Value = ()>, AgentError> { #[derive(CandidType)] struct In { mode: CanisterInstallMode, target_canister: Principal, store_canister: Option<Principal>, chunk_hashes_list: Vec<ChunkHash>, wasm_module_hash: Vec<u8>, arg: Vec<u8>, sender_canister_version: Option<u64>, } let Self { mode, target_canister, store_canister, chunk_hashes_list, wasm_module_hash, arg, .. } = self; Ok(self .canister .update(MgmtMethod::InstallChunkedCode.as_ref()) .with_arg(In { mode, target_canister, store_canister, chunk_hashes_list, wasm_module_hash, arg: arg.serialize()?, sender_canister_version: None, }) .with_effective_canister_id(target_canister) .build()) } /// Make the call. This is equivalent to [`AsyncCall::call`]. pub async fn call(self) -> Result<CallResponse<()>, AgentError> { self.build()?.call().await } /// Make the call. This is equivalent to [`AsyncCall::call_and_wait`]. pub async fn call_and_wait(self) -> Result<(), AgentError> { self.build()?.call_and_wait().await } } #[cfg_attr(target_family = "wasm", async_trait(?Send))] #[cfg_attr(not(target_family = "wasm"), async_trait)] impl<'agent, 'canister: 'agent> AsyncCall for InstallChunkedCodeBuilder<'agent, 'canister> { type Value = (); async fn call(self) -> Result<CallResponse<()>, AgentError> { self.call().await } async fn call_and_wait(self) -> Result<(), AgentError> { self.call_and_wait().await } } impl<'agent, 'canister: 'agent> IntoFuture for InstallChunkedCodeBuilder<'agent, 'canister> { type IntoFuture = CallFuture<'agent, ()>; type Output = Result<(), AgentError>; fn into_future(self) -> Self::IntoFuture { AsyncCall::call_and_wait(self) } } /// A builder for a [`ManagementCanister::install`] call. This automatically selects one-shot installation or chunked installation depending on module size. /// /// # Warnings /// /// This will clear chunked code storage if chunked installation is used. Do not use with canisters that you are manually uploading chunked code to. #[derive(Debug)] pub struct InstallBuilder<'agent, 'canister, 'builder> { canister: &'canister ManagementCanister<'agent>, canister_id: Principal, // more precise lifetimes are used here at risk of annoying the user // because `wasm` may be memory-mapped which is tricky to lifetime wasm: &'builder [u8], arg: Argument, mode: CanisterInstallMode, } impl<'agent: 'canister, 'canister: 'builder, 'builder> InstallBuilder<'agent, 'canister, 'builder> { // Messages are a maximum of 2MiB. Thus basic installation should cap the wasm and arg size at 1.85MiB, since // the current API is definitely not going to produce 150KiB of framing data for it. const CHUNK_CUTOFF: usize = (1.85 * 1024. * 1024.) as usize; /// Create a canister installation builder. pub fn builder( canister: &'canister ManagementCanister<'agent>, canister_id: &Principal, wasm: &'builder [u8], ) -> Self { Self { canister, canister_id: *canister_id, wasm, arg: Default::default(), mode: CanisterInstallMode::Install, } } /// Set the argument to the installation, which will be passed to the init /// method of the canister. Can be called at most once. pub fn with_arg<Argument: CandidType>(mut self, arg: Argument) -> Self { self.arg.set_idl_arg(arg); self } /// Set the argument with multiple arguments as tuple to the installation, /// which will be passed to the init method of the canister. Can be called at most once. pub fn with_args(mut self, tuple: impl ArgumentEncoder) -> Self { assert!(self.arg.0.is_none(), "argument is being set more than once"); self.arg = Argument::from_candid(tuple); self } /// Set the argument passed in to the canister with raw bytes. Can be called at most once. pub fn with_raw_arg(mut self, arg: Vec<u8>) -> Self { self.arg.set_raw_arg(arg); self } /// Pass in the [`CanisterInstallMode`]. pub fn with_mode(self, mode: CanisterInstallMode) -> Self { Self { mode, ..self } } /// Invoke the installation process. This may result in many calls which may take several seconds; /// use [`call_and_wait_with_progress`](Self::call_and_wait_with_progress) if you want progress reporting. pub async fn call_and_wait(self) -> Result<(), AgentError> { self.call_and_wait_with_progress() .await .try_for_each(|_| ready(Ok(()))) .await } /// Invoke the installation process. The returned stream must be iterated to completion; it is used to track progress, /// as installation may take arbitrarily long, and is intended to be passed to functions like `indicatif::ProgressBar::wrap_stream`. /// There are exactly [`size_hint().0`](Stream::size_hint) steps. pub async fn call_and_wait_with_progress( self, ) -> impl Stream<Item = Result<(), AgentError>> + 'builder { let stream_res = /* try { */ async move { let arg = self.arg.serialize()?; let stream: BoxStream<'_, _> = if self.wasm.len() + arg.len() < Self::CHUNK_CUTOFF { Box::pin( async move { self.canister .install_code(&self.canister_id, self.wasm) .with_raw_arg(arg) .with_mode(self.mode) .call_and_wait() .await } .into_stream(), ) } else { let (existing_chunks,) = self.canister.stored_chunks(&self.canister_id).call_and_wait().await?; let existing_chunks = existing_chunks.into_iter().map(|c| c.hash).collect::<BTreeSet<_>>(); let all_chunks = self.wasm.chunks(1024 * 1024).map(|x| (Sha256::digest(x).to_vec(), x)).collect::<Vec<_>>(); let mut to_upload_chunks = vec![]; for (hash, chunk) in &all_chunks { if !existing_chunks.contains(hash) { to_upload_chunks.push(*chunk); } } let upload_chunks_stream = FuturesUnordered::new(); for chunk in to_upload_chunks { upload_chunks_stream.push(async move { let (_res,) = self .canister .upload_chunk(&self.canister_id, &ic_management_canister_types::UploadChunkArgs { canister_id: self.canister_id, chunk: chunk.to_vec(), }) .call_and_wait() .await?; Ok(()) }); }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
true
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-utils/src/interfaces/management_canister/attributes.rs
ic-utils/src/interfaces/management_canister/attributes.rs
//! Checked wrappers around certain numeric values used in management calls. use thiserror::Error; /// An error encountered when attempting to construct a [`ComputeAllocation`]. #[derive(Error, Debug)] pub enum ComputeAllocationError { /// The provided value was not a percentage in the range [0, 100]. #[error("Must be a percent between 0 and 100.")] MustBeAPercentage(), } /// A compute allocation for a canister, represented as a percentage between 0 and 100 inclusive. /// /// This represents the percentage of a canister's maximum compute capacity that the IC should commit to guaranteeing for the canister. /// If 0, computation is provided on a best-effort basis. #[derive(Copy, Clone, Debug)] pub struct ComputeAllocation(u8); impl std::convert::From<ComputeAllocation> for u8 { fn from(compute_allocation: ComputeAllocation) -> Self { compute_allocation.0 } } macro_rules! try_from_compute_alloc_decl { ( $t: ty ) => { impl std::convert::TryFrom<$t> for ComputeAllocation { type Error = ComputeAllocationError; fn try_from(value: $t) -> Result<Self, Self::Error> { if (value as i64) < 0 || (value as i64) > 100 { Err(ComputeAllocationError::MustBeAPercentage()) } else { Ok(Self(value as u8)) } } } }; } try_from_compute_alloc_decl!(u8); try_from_compute_alloc_decl!(u16); try_from_compute_alloc_decl!(u32); try_from_compute_alloc_decl!(u64); try_from_compute_alloc_decl!(i8); try_from_compute_alloc_decl!(i16); try_from_compute_alloc_decl!(i32); try_from_compute_alloc_decl!(i64); /// An error encountered when attempting to construct a [`MemoryAllocation`]. #[derive(Error, Debug)] pub enum MemoryAllocationError { /// The provided value was not in the range [0, 2^48] (i.e. 256 TiB). #[error("Memory allocation must be between 0 and 2^48 (i.e 256TiB), inclusively. Got {0}.")] InvalidMemorySize(u64), } /// A memory allocation for a canister. Can be anywhere from 0 to 2^48 (i.e. 256 TiB) inclusive. /// /// This represents the size, in bytes, that the IC guarantees to the canister and limits the canister to. /// If a canister attempts to exceed this value (and the value is nonzero), the attempt will fail. If 0, /// memory allocation is provided on a best-effort basis. #[derive(Copy, Clone, Debug)] pub struct MemoryAllocation(u64); impl std::convert::From<MemoryAllocation> for u64 { fn from(memory_allocation: MemoryAllocation) -> Self { memory_allocation.0 } } macro_rules! try_from_memory_alloc_decl { ( $t: ty ) => { impl std::convert::TryFrom<$t> for MemoryAllocation { type Error = MemoryAllocationError; fn try_from(value: $t) -> Result<Self, Self::Error> { if (value as i64) < 0 || (value as i64) > (1i64 << 48) { Err(MemoryAllocationError::InvalidMemorySize(value as u64)) } else { Ok(Self(value as u64)) } } } }; } try_from_memory_alloc_decl!(u8); try_from_memory_alloc_decl!(u16); try_from_memory_alloc_decl!(u32); try_from_memory_alloc_decl!(u64); try_from_memory_alloc_decl!(i8); try_from_memory_alloc_decl!(i16); try_from_memory_alloc_decl!(i32); try_from_memory_alloc_decl!(i64); /// An error encountered when attempting to construct a [`FreezingThreshold`]. #[derive(Error, Debug)] pub enum FreezingThresholdError { /// The provided value was not in the range [0, 2^64-1]. #[error("Freezing threshold must be between 0 and 2^64-1, inclusively. Got {0}.")] InvalidFreezingThreshold(u64), } /// A freezing threshold for a canister. Can be anywhere from 0 to 2^64-1 inclusive. /// /// This represents the time, in seconds, of 'runway' the IC tries to guarantee the canister. /// If the canister's persistent costs, like storage, will likely lead it to run out of cycles within this amount of time, /// then the IC will 'freeze' the canister. Attempts to call its methods will be rejected unconditionally. /// The canister also cannot make any calls that push its cycle count into freezing threshold range. #[derive(Copy, Clone, Debug)] pub struct FreezingThreshold(u64); impl std::convert::From<FreezingThreshold> for u64 { fn from(freezing_threshold: FreezingThreshold) -> Self { freezing_threshold.0 } } macro_rules! try_from_freezing_threshold_decl { ( $t: ty ) => { impl std::convert::TryFrom<$t> for FreezingThreshold { type Error = FreezingThresholdError; fn try_from(value: $t) -> Result<Self, Self::Error> { if (value as i128) < 0 || (value as i128) > (2_i128.pow(64) - 1i128) { Err(FreezingThresholdError::InvalidFreezingThreshold( value as u64, )) } else { Ok(Self(value as u64)) } } } }; } try_from_freezing_threshold_decl!(u8); try_from_freezing_threshold_decl!(u16); try_from_freezing_threshold_decl!(u32); try_from_freezing_threshold_decl!(u64); try_from_freezing_threshold_decl!(i8); try_from_freezing_threshold_decl!(i16); try_from_freezing_threshold_decl!(i32); try_from_freezing_threshold_decl!(i64); try_from_freezing_threshold_decl!(i128); try_from_freezing_threshold_decl!(u128); /// An error encountered when attempting to construct a [`ReservedCyclesLimit`]. #[derive(Error, Debug)] pub enum ReservedCyclesLimitError { /// The provided value was not in the range [0, 2^128-1]. #[error("ReservedCyclesLimit must be between 0 and 2^128-1, inclusively. Got {0}.")] InvalidReservedCyclesLimit(i128), } /// A reserved cycles limit for a canister. Can be anywhere from 0 to 2^128-1 inclusive. /// /// This represents the upper limit of `reserved_cycles` for the canister. /// /// Reserved cycles are cycles that the system sets aside for future use by the canister. /// If a subnet's storage exceeds 450 GiB, then every time a canister allocates new storage bytes, /// the system sets aside some amount of cycles from the main balance of the canister. /// These reserved cycles will be used to cover future payments for the newly allocated bytes. /// The reserved cycles are not transferable and the amount of reserved cycles depends on how full the subnet is. /// /// A reserved cycles limit of 0 disables the reservation mechanism for the canister. /// If so disabled, the canister will trap when it tries to allocate storage, if the subnet's usage exceeds 450 GiB. #[derive(Copy, Clone, Debug)] pub struct ReservedCyclesLimit(u128); impl std::convert::From<ReservedCyclesLimit> for u128 { fn from(reserved_cycles_limit: ReservedCyclesLimit) -> Self { reserved_cycles_limit.0 } } #[allow(unused_comparisons)] macro_rules! try_from_reserved_cycles_limit_decl { ( $t: ty ) => { impl std::convert::TryFrom<$t> for ReservedCyclesLimit { type Error = ReservedCyclesLimitError; fn try_from(value: $t) -> Result<Self, Self::Error> { #[allow(unused_comparisons)] if value < 0 { Err(ReservedCyclesLimitError::InvalidReservedCyclesLimit( value as i128, )) } else { Ok(Self(value as u128)) } } } }; } try_from_reserved_cycles_limit_decl!(u8); try_from_reserved_cycles_limit_decl!(u16); try_from_reserved_cycles_limit_decl!(u32); try_from_reserved_cycles_limit_decl!(u64); try_from_reserved_cycles_limit_decl!(i8); try_from_reserved_cycles_limit_decl!(i16); try_from_reserved_cycles_limit_decl!(i32); try_from_reserved_cycles_limit_decl!(i64); try_from_reserved_cycles_limit_decl!(i128); try_from_reserved_cycles_limit_decl!(u128); /// An error encountered when attempting to construct a [`WasmMemoryLimit`]. #[derive(Error, Debug)] pub enum WasmMemoryLimitError { /// The provided value was not in the range [0, 2^48] (i.e. 256 TiB). #[error("Wasm memory limit must be between 0 and 2^48 (i.e 256TiB), inclusively. Got {0}.")] InvalidMemoryLimit(i64), } /// A soft limit on the Wasm memory usage of the canister. Update calls, /// timers, heartbeats, install, and post-upgrade fail if the Wasm memory /// usage exceeds this limit. The main purpose of this field is to protect /// against the case when the canister reaches the hard 4GiB limit. /// Must be a number between 0 and 2^48^ (i.e 256TB), inclusively. #[derive(Copy, Clone, Debug)] pub struct WasmMemoryLimit(u64); impl std::convert::From<WasmMemoryLimit> for u64 { fn from(wasm_memory_limit: WasmMemoryLimit) -> Self { wasm_memory_limit.0 } } macro_rules! try_from_wasm_memory_limit_decl { ( $t: ty ) => { impl std::convert::TryFrom<$t> for WasmMemoryLimit { type Error = WasmMemoryLimitError; fn try_from(value: $t) -> Result<Self, Self::Error> { if (value as i64) < 0 || (value as i64) > (1i64 << 48) { Err(Self::Error::InvalidMemoryLimit(value as i64)) } else { Ok(Self(value as u64)) } } } }; } try_from_wasm_memory_limit_decl!(u8); try_from_wasm_memory_limit_decl!(u16); try_from_wasm_memory_limit_decl!(u32); try_from_wasm_memory_limit_decl!(u64); try_from_wasm_memory_limit_decl!(i8); try_from_wasm_memory_limit_decl!(i16); try_from_wasm_memory_limit_decl!(i32); try_from_wasm_memory_limit_decl!(i64); #[test] #[allow(clippy::useless_conversion)] fn can_convert_compute_allocation() { use std::convert::{TryFrom, TryInto}; // This is more of a compiler test than an actual test. let _ca_u8: ComputeAllocation = 1u8.try_into().unwrap(); let _ca_u16: ComputeAllocation = 1u16.try_into().unwrap(); let _ca_u32: ComputeAllocation = 1u32.try_into().unwrap(); let _ca_u64: ComputeAllocation = 1u64.try_into().unwrap(); let _ca_i8: ComputeAllocation = 1i8.try_into().unwrap(); let _ca_i16: ComputeAllocation = 1i16.try_into().unwrap(); let _ca_i32: ComputeAllocation = 1i32.try_into().unwrap(); let _ca_i64: ComputeAllocation = 1i64.try_into().unwrap(); let ca = ComputeAllocation(100); let _ca_ca: ComputeAllocation = ComputeAllocation::try_from(ca).unwrap(); } #[test] #[allow(clippy::useless_conversion)] fn can_convert_memory_allocation() { use std::convert::{TryFrom, TryInto}; // This is more of a compiler test than an actual test. let _ma_u8: MemoryAllocation = 1u8.try_into().unwrap(); let _ma_u16: MemoryAllocation = 1u16.try_into().unwrap(); let _ma_u32: MemoryAllocation = 1u32.try_into().unwrap(); let _ma_u64: MemoryAllocation = 1u64.try_into().unwrap(); let _ma_i8: MemoryAllocation = 1i8.try_into().unwrap(); let _ma_i16: MemoryAllocation = 1i16.try_into().unwrap(); let _ma_i32: MemoryAllocation = 1i32.try_into().unwrap(); let _ma_i64: MemoryAllocation = 1i64.try_into().unwrap(); let ma = MemoryAllocation(100); let _ma_ma: MemoryAllocation = MemoryAllocation::try_from(ma).unwrap(); } #[test] #[allow(clippy::useless_conversion)] fn can_convert_freezing_threshold() { use std::convert::{TryFrom, TryInto}; // This is more of a compiler test than an actual test. let _ft_u8: FreezingThreshold = 1u8.try_into().unwrap(); let _ft_u16: FreezingThreshold = 1u16.try_into().unwrap(); let _ft_u32: FreezingThreshold = 1u32.try_into().unwrap(); let _ft_u64: FreezingThreshold = 1u64.try_into().unwrap(); let _ft_i8: FreezingThreshold = 1i8.try_into().unwrap(); let _ft_i16: FreezingThreshold = 1i16.try_into().unwrap(); let _ft_i32: FreezingThreshold = 1i32.try_into().unwrap(); let _ft_i64: FreezingThreshold = 1i64.try_into().unwrap(); let _ft_u128: FreezingThreshold = 1i128.try_into().unwrap(); let _ft_i128: FreezingThreshold = 1u128.try_into().unwrap(); let ft = FreezingThreshold(100); let _ft_ft: FreezingThreshold = FreezingThreshold::try_from(ft).unwrap(); } #[test] #[allow(clippy::useless_conversion)] fn can_convert_reserved_cycles_limit() { use std::convert::{TryFrom, TryInto}; // This is more of a compiler test than an actual test. let _ft_u8: ReservedCyclesLimit = 1u8.try_into().unwrap(); let _ft_u16: ReservedCyclesLimit = 1u16.try_into().unwrap(); let _ft_u32: ReservedCyclesLimit = 1u32.try_into().unwrap(); let _ft_u64: ReservedCyclesLimit = 1u64.try_into().unwrap(); let _ft_i8: ReservedCyclesLimit = 1i8.try_into().unwrap(); let _ft_i16: ReservedCyclesLimit = 1i16.try_into().unwrap(); let _ft_i32: ReservedCyclesLimit = 1i32.try_into().unwrap(); let _ft_i64: ReservedCyclesLimit = 1i64.try_into().unwrap(); let _ft_u128: ReservedCyclesLimit = 1i128.try_into().unwrap(); let _ft_i128: ReservedCyclesLimit = 1u128.try_into().unwrap(); assert!(matches!( ReservedCyclesLimit::try_from(-4).unwrap_err(), ReservedCyclesLimitError::InvalidReservedCyclesLimit(-4) )); assert_eq!( ReservedCyclesLimit::try_from(2u128.pow(127) + 6).unwrap().0, 170_141_183_460_469_231_731_687_303_715_884_105_734_u128 ); let ft = ReservedCyclesLimit(100); let _ft_ft: ReservedCyclesLimit = ReservedCyclesLimit::try_from(ft).unwrap(); } #[test] #[allow(clippy::useless_conversion)] fn can_convert_wasm_memory_limit() { use std::convert::{TryFrom, TryInto}; // This is more of a compiler test than an actual test. let _ma_u8: WasmMemoryLimit = 1u8.try_into().unwrap(); let _ma_u16: WasmMemoryLimit = 1u16.try_into().unwrap(); let _ma_u32: WasmMemoryLimit = 1u32.try_into().unwrap(); let _ma_u64: WasmMemoryLimit = 1u64.try_into().unwrap(); let _ma_i8: WasmMemoryLimit = 1i8.try_into().unwrap(); let _ma_i16: WasmMemoryLimit = 1i16.try_into().unwrap(); let _ma_i32: WasmMemoryLimit = 1i32.try_into().unwrap(); let _ma_i64: WasmMemoryLimit = 1i64.try_into().unwrap(); let ma = WasmMemoryLimit(100); let _ma_ma: WasmMemoryLimit = WasmMemoryLimit::try_from(ma).unwrap(); assert!(matches!( WasmMemoryLimit::try_from(-4).unwrap_err(), WasmMemoryLimitError::InvalidMemoryLimit(-4) )); assert!(matches!( WasmMemoryLimit::try_from(562_949_953_421_312_u64).unwrap_err(), WasmMemoryLimitError::InvalidMemoryLimit(562_949_953_421_312) )); }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/icx-cert/src/pprint.rs
icx-cert/src/pprint.rs
use anyhow::{anyhow, Context, Result}; use base64::prelude::*; use ic_certification::{HashTree, LookupResult}; use reqwest::header; use serde::{de::DeserializeOwned, Deserialize}; use sha2::Digest; use time::{format_description::well_known::Rfc3339, OffsetDateTime}; /// Structured contents of the IC-Certificate header. struct StructuredCertHeader<'a> { certificate: &'a str, tree: &'a str, } /// A fully parsed replica certificate. #[derive(Deserialize)] struct ReplicaCertificate { tree: HashTree, signature: serde_bytes::ByteBuf, } /// Parses the value of IC-Certificate header. fn parse_structured_cert_header(value: &str) -> Result<StructuredCertHeader<'_>> { fn extract_field<'a>(value: &'a str, field_name: &'a str, prefix: &'a str) -> Result<&'a str> { let start = value.find(prefix).ok_or_else(|| { anyhow!( "Certificate header doesn't have '{}' field: {}", field_name, value, ) })? + prefix.len(); let len = value[start..].find(':').ok_or_else(|| { anyhow!( "malformed '{}' field: no ending colon found: {}", prefix, value ) })?; Ok(&value[start..(start + len)]) } Ok(StructuredCertHeader { certificate: extract_field(value, "certificate", "certificate=:")?, tree: extract_field(value, "tree", "tree=:")?, }) } /// Decodes base64-encoded CBOR value. fn parse_base64_cbor<T: DeserializeOwned>(s: &str) -> Result<T> { let bytes = BASE64_STANDARD.decode(s).with_context(|| { format!( "failed to parse {}: invalid base64 {}", std::any::type_name::<T>(), s ) })?; serde_cbor::from_slice(&bytes[..]).with_context(|| { format!( "failed to parse {}: malformed CBOR", std::any::type_name::<T>() ) }) } /// Downloads the asset with the specified URL and pretty-print certificate contents. pub fn pprint(url: String, accept_encodings: Option<Vec<String>>) -> Result<()> { let response = { let client = reqwest::blocking::Client::builder(); let client = if let Some(accept_encodings) = accept_encodings { let mut headers = header::HeaderMap::new(); let accept_encodings: String = accept_encodings.join(", "); headers.insert( "Accept-Encoding", header::HeaderValue::from_str(&accept_encodings).unwrap(), ); client.default_headers(headers) } else { client }; client .user_agent("icx-cert") .build()? .get(url) .send() .with_context(|| "failed to fetch the document")? }; let status = response.status().as_u16(); let certificate_header = response .headers() .get("IC-Certificate") .ok_or_else(|| anyhow!("IC-Certificate header not found: {:?}", response.headers()))? .to_owned(); let content_encoding = response .headers() .get("Content-Encoding") .map(|x| x.to_owned()); let data = response .bytes() .with_context(|| "failed to get response body")?; let certificate_str = certificate_header.to_str().with_context(|| { format!("failed to convert certificate header {certificate_header:?} to string") })?; let structured_header = parse_structured_cert_header(certificate_str)?; let tree: HashTree = parse_base64_cbor(structured_header.tree)?; let cert: ReplicaCertificate = parse_base64_cbor(structured_header.certificate)?; println!("STATUS: {status}"); println!("ROOT HASH: {}", hex::encode(cert.tree.digest())); if let Some(content_encoding) = content_encoding { println!("CONTENT-ENCODING: {}", content_encoding.to_str().unwrap()); } println!( "DATA HASH: {}", hex::encode(sha2::Sha256::digest(data.as_ref())) ); println!("TREE HASH: {}", hex::encode(tree.digest())); println!("SIGNATURE: {}", hex::encode(cert.signature.as_ref())); if let LookupResult::Found(mut date_bytes) = cert.tree.lookup_path(&["time"]) { let timestamp_nanos = leb128::read::unsigned(&mut date_bytes) .with_context(|| "failed to decode certificate time as LEB128")?; let dt = OffsetDateTime::from_unix_timestamp_nanos(timestamp_nanos.into()) .context("timestamp out of range")?; println!("CERTIFICATE TIME: {}", dt.format(&Rfc3339)?); } println!("CERTIFICATE TREE: {:#?}", cert.tree); println!("TREE: {tree:#?}"); Ok(()) } #[test] fn test_parse_structured_header() { let header = parse_structured_cert_header("certificate=:abcdef:, tree=:010203:").unwrap(); assert_eq!(header.certificate, "abcdef"); assert_eq!(header.tree, "010203"); }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/icx-cert/src/main.rs
icx-cert/src/main.rs
use anyhow::Result; use clap::{crate_authors, crate_version, Parser}; mod pprint; #[derive(Parser)] #[command( version = crate_version!(), author = crate_authors!(), )] enum Command { /// Fetches the specified URL and pretty-prints the certificate. #[clap(name = "print")] PPrint { url: String, /// Specifies one or more encodings to accept. #[arg(long)] accept_encoding: Option<Vec<String>>, }, } fn main() -> Result<()> { match Command::parse() { Command::PPrint { url, accept_encoding, } => pprint::pprint(url, accept_encoding), } } #[cfg(test)] mod tests { use super::Command; use clap::CommandFactory; #[test] fn valid_command() { Command::command().debug_assert(); } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-identity-hsm/src/lib.rs
ic-identity-hsm/src/lib.rs
//! A crate to manage identities related to HSM (Hardware Security Module), //! allowing users to sign Internet Computer messages with their hardware key. //! Also supports `SoftHSM`. //! //! # Example //! //! ```rust,no_run //! use ic_agent::agent::Agent; //! use ic_identity_hsm::HardwareIdentity; //! # fn main() -> Result<(), Box<dyn std::error::Error>> { //! # let replica_url = ""; //! # let lib_path = ""; //! # let slot_index = 0; //! # let key_id = ""; //! let agent = Agent::builder() //! .with_url(replica_url) //! .with_identity(HardwareIdentity::new(lib_path, slot_index, key_id, || Ok("hunter2".to_string()))?) //! .build(); //! # Ok(()) //! # } #![deny( missing_docs, missing_debug_implementations, rustdoc::broken_intra_doc_links, rustdoc::private_intra_doc_links )] pub(crate) mod hsm; pub use hsm::{HardwareIdentity, HardwareIdentityError};
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
dfinity/agent-rs
https://github.com/dfinity/agent-rs/blob/6fef5bfa96d2ed63b84afd173cc049e38cb5a210/ic-identity-hsm/src/hsm.rs
ic-identity-hsm/src/hsm.rs
use ic_agent::{ agent::EnvelopeContent, export::Principal, identity::Delegation, Identity, Signature, }; use pkcs11::{ types::{ CKA_CLASS, CKA_EC_PARAMS, CKA_EC_POINT, CKA_ID, CKA_KEY_TYPE, CKF_LOGIN_REQUIRED, CKF_SERIAL_SESSION, CKK_ECDSA, CKM_ECDSA, CKO_PRIVATE_KEY, CKO_PUBLIC_KEY, CKU_USER, CK_ATTRIBUTE, CK_ATTRIBUTE_TYPE, CK_KEY_TYPE, CK_MECHANISM, CK_OBJECT_CLASS, CK_OBJECT_HANDLE, CK_SESSION_HANDLE, CK_SLOT_ID, }, Ctx, }; use sha2::{ digest::{generic_array::GenericArray, OutputSizeUser}, Digest, Sha256, }; use simple_asn1::{ from_der, oid, to_der, ASN1Block::{BitString, ObjectIdentifier, OctetString, Sequence}, ASN1DecodeErr, ASN1EncodeErr, }; use std::{path::Path, ptr}; use thiserror::Error; type KeyIdVec = Vec<u8>; type KeyId = [u8]; type DerPublicKeyVec = Vec<u8>; /// Type alias for a sha256 result (ie. a u256). type Sha256Hash = GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>; // We expect the parameters to be curve secp256r1. This is the base127 encoded form: const EXPECTED_EC_PARAMS: &[u8; 10] = b"\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07"; /// An error happened related to a `HardwareIdentity`. #[derive(Error, Debug)] pub enum HardwareIdentityError { /// A PKCS11 error occurred. #[error(transparent)] PKCS11(#[from] pkcs11::errors::Error), // ASN1DecodeError does not implement the Error trait and so we cannot use #[from] /// An error occurred when decoding ASN1. #[error("ASN decode error {0}")] ASN1Decode(ASN1DecodeErr), /// An error occurred when encoding ASN1. #[error(transparent)] ASN1Encode(#[from] ASN1EncodeErr), /// An error occurred when decoding a key ID. #[error(transparent)] KeyIdDecode(#[from] hex::FromHexError), /// The key was not found. #[error("Key not found")] KeyNotFound, /// An unexpected key type was found. #[error("Unexpected key type {0}")] UnexpectedKeyType(CK_KEY_TYPE), /// An `EcPoint` block was expected to be an `OctetString`, but was not. #[error("Expected EcPoint to be an OctetString")] ExpectedEcPointOctetString, /// An `EcPoint` block was unexpectedly empty. #[error("EcPoint is empty")] EcPointEmpty, /// The attribute with the specified type was not found. #[error("Attribute with type={0} not found")] AttributeNotFound(CK_ATTRIBUTE_TYPE), /// The `EcParams` given were not the ones the crate expected. #[error("Invalid EcParams. Expected prime256v1 {:02x?}, actual is {:02x?}", .expected, .actual)] InvalidEcParams { /// The expected value of the `EcParams`. expected: Vec<u8>, /// The actual value of the `EcParams`. actual: Vec<u8>, }, /// The PIN login function returned an error, but PIN login was required. #[error("User PIN is required: {0}")] UserPinRequired(String), /// A slot index was provided that does not exist. #[error("No such slot index ({0}")] NoSuchSlotIndex(usize), } /// An identity based on an HSM #[derive(Debug)] pub struct HardwareIdentity { key_id: KeyIdVec, ctx: Ctx, session_handle: CK_SESSION_HANDLE, logged_in: bool, public_key: DerPublicKeyVec, } impl HardwareIdentity { /// Create an identity using a specific key on an HSM. /// The filename will be something like /usr/local/lib/opensc-pkcs11.s /// The `key_id` must refer to a ECDSA key with parameters prime256v1 (secp256r1) /// The key must already have been created. You can create one with pkcs11-tool: /// ```sh /// pkcs11-tool -k --slot $SLOT -d $KEY_ID --key-type EC:prime256v1 --pin $PIN /// ``` pub fn new<P, PinFn>( pkcs11_lib_path: P, slot_index: usize, key_id: &str, pin_fn: PinFn, ) -> Result<HardwareIdentity, HardwareIdentityError> where P: AsRef<Path>, PinFn: FnOnce() -> Result<String, String>, { let ctx = Ctx::new_and_initialize(pkcs11_lib_path)?; let slot_id = get_slot_id(&ctx, slot_index)?; let session_handle = open_session(&ctx, slot_id)?; let logged_in = login_if_required(&ctx, session_handle, pin_fn, slot_id)?; let key_id = str_to_key_id(key_id)?; let public_key = get_der_encoded_public_key(&ctx, session_handle, &key_id)?; Ok(HardwareIdentity { key_id, ctx, session_handle, logged_in, public_key, }) } } impl Identity for HardwareIdentity { fn sender(&self) -> Result<Principal, String> { Ok(Principal::self_authenticating(&self.public_key)) } fn public_key(&self) -> Option<Vec<u8>> { Some(self.public_key.clone()) } fn sign(&self, content: &EnvelopeContent) -> Result<Signature, String> { self.sign_arbitrary(&content.to_request_id().signable()) } fn sign_delegation(&self, content: &Delegation) -> Result<Signature, String> { self.sign_arbitrary(&content.signable()) } fn sign_arbitrary(&self, content: &[u8]) -> Result<Signature, String> { let hash = Sha256::digest(content); let signature = self.sign_hash(&hash)?; Ok(Signature { public_key: self.public_key(), signature: Some(signature), delegations: None, }) } } fn get_slot_id(ctx: &Ctx, slot_index: usize) -> Result<CK_SLOT_ID, HardwareIdentityError> { ctx.get_slot_list(true)? .get(slot_index) .ok_or(HardwareIdentityError::NoSuchSlotIndex(slot_index)) .copied() } // We open a session for the duration of the lifetime of the HardwareIdentity. fn open_session( ctx: &Ctx, slot_id: CK_SLOT_ID, ) -> Result<CK_SESSION_HANDLE, HardwareIdentityError> { let flags = CKF_SERIAL_SESSION; let application = None; let notify = None; let session_handle = ctx.open_session(slot_id, flags, application, notify)?; Ok(session_handle) } // We might need to log in. This requires the PIN. fn login_if_required<PinFn>( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, pin_fn: PinFn, slot_id: CK_SLOT_ID, ) -> Result<bool, HardwareIdentityError> where PinFn: FnOnce() -> Result<String, String>, { let token_info = ctx.get_token_info(slot_id)?; let login_required = token_info.flags & CKF_LOGIN_REQUIRED != 0; if login_required { let pin = pin_fn().map_err(HardwareIdentityError::UserPinRequired)?; ctx.login(session_handle, CKU_USER, Some(&pin))?; } Ok(login_required) } // Return the DER-encoded public key in the expected format. // We also validate that it's an ECDSA key on the correct curve. fn get_der_encoded_public_key( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, key_id: &KeyId, ) -> Result<DerPublicKeyVec, HardwareIdentityError> { let object_handle = get_public_key_handle(ctx, session_handle, key_id)?; validate_key_type(ctx, session_handle, object_handle)?; validate_ec_params(ctx, session_handle, object_handle)?; let ec_point = get_ec_point(ctx, session_handle, object_handle)?; let oid_ecdsa = oid!(1, 2, 840, 10045, 2, 1); let oid_curve_secp256r1 = oid!(1, 2, 840, 10045, 3, 1, 7); let ec_param = Sequence( 0, vec![ ObjectIdentifier(0, oid_ecdsa), ObjectIdentifier(0, oid_curve_secp256r1), ], ); let ec_point = BitString(0, ec_point.len() * 8, ec_point); let public_key = Sequence(0, vec![ec_param, ec_point]); let der = to_der(&public_key)?; Ok(der) } // Ensure that the key type is ECDSA. fn validate_key_type( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, object_handle: CK_OBJECT_HANDLE, ) -> Result<(), HardwareIdentityError> { // The call to ctx.get_attribute_value() will mutate kt! // with_ck_ulong` stores &kt as a mutable pointer by casting it to CK_VOID_PTR, which is: // pub type CK_VOID_PTR = *mut CK_VOID; // `let mut kt...` here emits a warning, unfortunately. let kt: CK_KEY_TYPE = 0; let mut attribute_types = vec![CK_ATTRIBUTE::new(CKA_KEY_TYPE).with_ck_ulong(&kt)]; ctx.get_attribute_value(session_handle, object_handle, &mut attribute_types)?; if kt == CKK_ECDSA { Ok(()) } else { Err(HardwareIdentityError::UnexpectedKeyType(kt)) } } // We just want to make sure that we are using the expected EC curve prime256v1 (secp256r1), // since the HSMs also support things like secp384r1 and secp512r1. fn validate_ec_params( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, object_handle: CK_OBJECT_HANDLE, ) -> Result<(), HardwareIdentityError> { let ec_params = get_ec_params(ctx, session_handle, object_handle)?; if ec_params == EXPECTED_EC_PARAMS { Ok(()) } else { Err(HardwareIdentityError::InvalidEcParams { expected: EXPECTED_EC_PARAMS.to_vec(), actual: ec_params, }) } } // Obtain the EcPoint, which is an (x,y) coordinate. Each coordinate is 32 bytes. // These are preceded by an 04 byte meaning "uncompressed point." // The returned vector will therefore have len=65. fn get_ec_point( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, object_handle: CK_OBJECT_HANDLE, ) -> Result<Vec<u8>, HardwareIdentityError> { let der_encoded_ec_point = get_variable_length_attribute(ctx, session_handle, object_handle, CKA_EC_POINT)?; let blocks = from_der(der_encoded_ec_point.as_slice()).map_err(HardwareIdentityError::ASN1Decode)?; let block = blocks.first().ok_or(HardwareIdentityError::EcPointEmpty)?; if let OctetString(_size, data) = block { Ok(data.clone()) } else { Err(HardwareIdentityError::ExpectedEcPointOctetString) } } // In order to read a variable-length attribute, we need to first read its length. fn get_attribute_length( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, object_handle: CK_OBJECT_HANDLE, attribute_type: CK_ATTRIBUTE_TYPE, ) -> Result<usize, HardwareIdentityError> { let mut attributes = vec![CK_ATTRIBUTE::new(attribute_type)]; ctx.get_attribute_value(session_handle, object_handle, &mut attributes)?; let first = attributes .first() .ok_or(HardwareIdentityError::AttributeNotFound(attribute_type))?; Ok(first.ulValueLen as usize) } // Get a variable-length attribute, by first reading its length and then the value. fn get_variable_length_attribute( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, object_handle: CK_OBJECT_HANDLE, attribute_type: CK_ATTRIBUTE_TYPE, ) -> Result<Vec<u8>, HardwareIdentityError> { let length = get_attribute_length(ctx, session_handle, object_handle, attribute_type)?; let value = vec![0; length]; let mut attrs = vec![CK_ATTRIBUTE::new(attribute_type).with_bytes(value.as_slice())]; ctx.get_attribute_value(session_handle, object_handle, &mut attrs)?; Ok(value) } fn get_ec_params( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, object_handle: CK_OBJECT_HANDLE, ) -> Result<Vec<u8>, HardwareIdentityError> { get_variable_length_attribute(ctx, session_handle, object_handle, CKA_EC_PARAMS) } fn get_public_key_handle( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, key_id: &KeyId, ) -> Result<CK_OBJECT_HANDLE, HardwareIdentityError> { get_object_handle_for_key(ctx, session_handle, key_id, CKO_PUBLIC_KEY) } fn get_private_key_handle( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, key_id: &KeyId, ) -> Result<CK_OBJECT_HANDLE, HardwareIdentityError> { get_object_handle_for_key(ctx, session_handle, key_id, CKO_PRIVATE_KEY) } // Find a public or private key. fn get_object_handle_for_key( ctx: &Ctx, session_handle: CK_SESSION_HANDLE, key_id: &KeyId, object_class: CK_OBJECT_CLASS, ) -> Result<CK_OBJECT_HANDLE, HardwareIdentityError> { let attributes = [ CK_ATTRIBUTE::new(CKA_ID).with_bytes(key_id), CK_ATTRIBUTE::new(CKA_CLASS).with_ck_ulong(&object_class), ]; ctx.find_objects_init(session_handle, &attributes)?; let object_handles = ctx.find_objects(session_handle, 1)?; let object_handle = *object_handles .first() .ok_or(HardwareIdentityError::KeyNotFound)?; ctx.find_objects_final(session_handle)?; Ok(object_handle) } // A key id is a sequence of pairs of hex digits, case-insensitive. fn str_to_key_id(s: &str) -> Result<KeyIdVec, HardwareIdentityError> { let bytes = hex::decode(s)?; Ok(bytes) } impl HardwareIdentity { fn sign_hash(&self, hash: &Sha256Hash) -> Result<Vec<u8>, String> { let private_key_handle = get_private_key_handle(&self.ctx, self.session_handle, &self.key_id) .map_err(|e| format!("Failed to get private key handle: {e}"))?; let mechanism = CK_MECHANISM { mechanism: CKM_ECDSA, pParameter: ptr::null_mut(), ulParameterLen: 0, }; self.ctx .sign_init(self.session_handle, &mechanism, private_key_handle) .map_err(|e| format!("Failed to initialize signature: {e}"))?; self.ctx .sign(self.session_handle, hash) .map_err(|e| format!("Failed to generate signature: {e}")) } } impl Drop for HardwareIdentity { fn drop(&mut self) { if self.logged_in { // necessary? probably not self.ctx.logout(self.session_handle).unwrap(); } self.ctx.close_session(self.session_handle).unwrap(); } } #[cfg(test)] mod tests { use crate::hsm::str_to_key_id; #[test] fn key_id_conversion() { let key_id_v = str_to_key_id("a53f61e3").unwrap(); assert_eq!(key_id_v, vec![0xa5, 0x3f, 0x61, 0xe3]); } }
rust
Apache-2.0
6fef5bfa96d2ed63b84afd173cc049e38cb5a210
2026-01-04T20:16:51.650214Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/from.rs
ocamlrep/from.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. //! Helpers for implementing `FromOcamlRep::from_ocamlrep` or //! `FromOcamlRepIn::from_ocamlrep_in`. use bumpalo::Bump; use crate::Block; use crate::FromError; use crate::FromOcamlRep; use crate::FromOcamlRepIn; use crate::Value; pub fn expect_int(value: Value<'_>) -> Result<isize, FromError> { match value.as_int() { Some(value) => Ok(value), None => Err(FromError::ExpectedInt(value.to_bits())), } } pub fn expect_nullary_variant(value: Value<'_>, max: usize) -> Result<isize, FromError> { let value = expect_int(value)?; let max_as_isize: isize = max.try_into().unwrap(); if 0 <= value && value <= max_as_isize { Ok(value) } else { Err(FromError::NullaryVariantTagOutOfRange { max, actual: value }) } } pub fn expect_block(value: Value<'_>) -> Result<Block<'_>, FromError> { match value.as_block() { Some(block) => Ok(block), None => Err(FromError::ExpectedBlock(value.as_int().unwrap())), } } pub fn expect_block_size(block: Block<'_>, size: usize) -> Result<(), FromError> { if block.size() != size { return Err(FromError::WrongBlockSize { expected: size, actual: block.size(), }); } Ok(()) } pub fn expect_block_tag(block: Block<'_>, tag: u8) -> Result<(), FromError> { if block.tag() != tag { return Err(FromError::ExpectedBlockTag { expected: tag, actual: block.tag(), }); } Ok(()) } pub fn expect_block_with_size_and_tag( value: Value<'_>, size: usize, tag: u8, ) -> Result<Block<'_>, FromError> { let block = expect_block(value)?; expect_block_size(block, size)?; expect_block_tag(block, tag)?; Ok(block) } pub fn expect_tuple(value: Value<'_>, size: usize) -> Result<Block<'_>, FromError> { let block = expect_block(value)?; expect_block_size(block, size)?; if block.tag() != 0 { return Err(FromError::ExpectedZeroTag(block.tag())); } Ok(block) } pub fn field<T: FromOcamlRep>(block: Block<'_>, field: usize) -> Result<T, FromError> { T::from_ocamlrep(block[field]).map_err(|e| FromError::ErrorInField(field, Box::new(e))) } pub fn field_in<'a, T: FromOcamlRepIn<'a>>( block: Block<'_>, field: usize, alloc: &'a Bump, ) -> Result<T, FromError> { T::from_ocamlrep_in(block[field], alloc) .map_err(|e| FromError::ErrorInField(field, Box::new(e))) }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/lib.rs
ocamlrep/lib.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. /*! OcamlRep is a framework for building and interpreting the in-memory representation of OCaml values. This is useful for converting Rust values to OCaml values and vice-versa, and for building and storing OCaml values off of the OCaml runtime's garbage-collected heap. OcamlRep provides a generic interface abstracting over the allocation of OCaml values, allowing custom allocators to choose where values are allocated (including directly onto the OCaml heap). # Example: build an OCaml value ############################################### This crate provides an arena-allocator which manages the memory in which converted OCaml values are stored. When the arena is dropped, the values are freed. ```rust // Import the Allocator trait for access to its `add` method, which builds an // OCaml-value representation of the argument (using `OcamlRep::to_ocamlrep`) // and returns that value. use ocamlrep::{Allocator, Arena, Value}; // The `ocamlrep` crate provides implementations of `OcamlRep` for builtin types // like `Option`, `String`, integers, and tuples. This allows them to be // converted to OCaml values using the Allocator trait (which Arena implements). let tuple = (Some(42), String::from("a")); // Allocate a chunk of Rust-managed backing storage for our OCaml value. let arena = Arena::new(); // This value borrows the Arena, to ensure that we cannot continue to use it // after the Arena has been freed. OcamlRep values do not borrow the Rust values // they were converted from--the string "a" is copied into the Arena. let ocamlrep_value: Value<'_> = arena.add(&tuple); // We must now convert the value to a usize which can be handed over to the // OCaml runtime. We must take care when doing this to ensure that our OCaml // program doesn't use the value after the Arena is freed. let ocaml_value: usize = ocamlrep_value.to_bits(); ``` # Example: return an OCaml value to the OCaml runtime ######################### The value `ocaml_value` from the previous example is suitable to be handed to the OCaml runtime. We might do so with an extern declaration like this: ```ocaml external get_tuple : unit -> int option * string = "get_tuple" let use_rust_tuple () = match get_tuple () with | (Some i, s) -> Printf.printf "%d, %s\n" i s | (None, s) -> Printf.printf "None, %s\n" s ``` We could provide this symbol from the Rust side like this: ```rust #[unsafe(no_mangle)] pub extern "C" fn get_tuple(_unit: usize) -> usize { use ocamlrep::{Allocator, Arena}; let arena = Box::leak(Box::new(Arena::new())); let ocaml_tuple = arena.add(&(Some(42), String::from("a"))); // This is safe because we leaked the Arena--no matter what we do with this // value on the OCaml side, we'll never use-after-free. ocaml_tuple.to_bits() } ``` But this leaks memory. For small amounts of memory, or a short-lived process, this might be fine. If not, we might choose another strategy... # Example: lend an OCaml value to the OCaml runtime ########################### Instead, we could register an OCaml callback which uses the value: ```ocaml let use_tuple (tuple : int option * string) : unit = match tuple with | (Some i, s) -> Printf.printf "%d, %s\n" i s | (None, s) -> Printf.printf "None, %s\n" s external make_and_use_tuple : unit -> unit = "make_and_use_tuple" let () = Callback.register "use_tuple" use_tuple; make_and_use_tuple () ``` And call into OCaml from Rust (using the `ocaml` crate) to hand over the value: ```rust #[unsafe(no_mangle)] pub extern "C" fn make_and_use_tuple(_unit: usize) -> usize { use ocamlrep::{Allocator, Arena}; let arena = Arena::new(); let tuple_ocamlrep = arena.add(&(Some(42), String::from("a"))); let use_tuple = ocaml::named_value("use_tuple") .expect("use_tuple must be registered using Callback.register"); // This is safe because we are passing the value to `use_tuple`, which // doesn't store the value and returns before we free the Arena. let ocaml_tuple: usize = tuple_ocamlrep.to_bits(); use_tuple .call(ocaml::Value::new(ocaml_tuple)) .expect("use_tuple must be a function"); // Free the arena and return unit. ocaml::core::mlvalues::UNIT } ``` # Example: pass an OCaml value to the OCaml runtime ########################### The `ocamlrep_ocamlpool` crate provides an `Allocator` which builds values on the OCaml runtime's garbage-collected heap. We can replace the memory-leaking implementation from the [second example ("return an OCaml value to the OCaml runtime")](#example-return-an-ocaml-value-to-the-ocaml-runtime) with one that allows the OCaml value to be garbage-collected when no longer used: ```rust #[unsafe(no_mangle)] pub extern "C" fn get_tuple(_unit: usize) -> usize { ocamlrep_ocamlpool::to_ocaml(&(Some(42), String::from("a"))) } ``` The `ocamlrep_ocamlpool` crate provides a convenience macro for this use case: ```rust use ocamlrep_ocamlpool::ocaml_ffi; ocaml_ffi! { // This expands to code similar to the above definition of get_tuple. fn get_tuple() -> (Option<i32>, String) { (Some(42), String::from("a")) } } ``` # Example: pass an OCaml value to Rust ######################################## An `Allocator` converts Rust values which implement the `OcamlRep` trait into OCaml values using the method `OcamlRep::to_ocamlrep`. The `OcamlRep` trait also provides a method `OcamlRep::from_ocamlrep` (and an FFI helper named `OcamlRep::from_ocaml`) for conversion in the other direction. If we call into Rust like this: ```ocaml external use_tuple : int option * string -> unit = "use_tuple" let () = use_tuple (Some 42, "a") ``` We could convert the tuple to a Rust value like this: ```rust #[unsafe(no_mangle)] pub extern "C" fn use_tuple(ocaml_tuple: usize) -> usize { // Import the OcamlRep trait to use its associated function `from_ocaml`. use ocamlrep::OcamlRep; // Safety: `ocaml_tuple` is a valid OCaml value allocated by the OCaml // runtime, all objects reachable from that value are also valid OCaml // values, and those objects cannot be concurrently modified while // `from_ocaml` runs. This is true because that graph of objects is owned by // the OCaml runtime, we didn't expose any of their pointers in any other // FFI functions, and the OCaml runtime is both single-threaded and // currently interrupted by an FFI call into this function. let tuple_result = unsafe { <(Option<i32>, String)>::from_ocaml(ocaml_tuple) }; let tuple = tuple_result .expect("Expected a value of type `int option * string`, \ but got some other type or invalid UTF-8"); println!("{:?}", tuple); ocaml::core::mlvalues::UNIT } ``` The `ocaml_ffi!` macro in the `ocamlrep_ocamlpool` crate supports this use case: ```rust use ocamlrep_ocamlpool::ocaml_ffi; ocaml_ffi! { // This expands to code similar to the above definition of use_tuple. fn use_tuple(tuple: (Option<i32>, String)) { println!("{:?}", tuple) } } ``` Note that the value returned by `from_ocaml` is owned--it is effectively a deep clone of the OCaml value. For instance, the OCaml string "a" is copied into a newly allocated Rust String--the Rust side does not need to worry about the OCaml value being garbage collected. Take care when using `from_ocaml`, `from_ocamlrep`, or `ocaml_ffi!` with types containing `String`s. OCaml strings are not guaranteed to be UTF-8, so `from_ocaml` may return an `Err` because a string was invalid UTF-8 rather than because the OCaml code did not pass a value of the expected type (which should be forbidden by the OCaml compiler, provided that the `external` declaration is annotated with the correct type). If representing invalid UTF-8 is a requirement, use `Vec<u8>` instead (an implementation of `OcamlRep` which converts `Vec<u8>` to an OCaml `string` is provided). # Example: implementing OcamlRep ############################################## Writing a manual implementation of OcamlRep requires one to choose some type with which their value should be represented in OCaml and write `to_ocamlrep` and `from_ocamlrep` conversion functions which build and interpret the in-memory representation of that type. This crate provides implementations of OcamlRep for several std types, and chooses these OCaml types to represent them with: | Rust type | OCaml type | |------------------------|-----------------------------------------| | `()` | `unit` | | `bool` | `bool` | | `usize`/`isize` | `int` | | [`Option`][OptionRs] | [`option`][OptionMl] | | [`Result`][ResultRs] | [`result`][ResultMl] | | [`Vec`][Vec] | [`list`][List] | | [`String`][StringRs] | [`string`][StringMl] (when valid UTF-8) | | [`Vec<u8>`][Vec] | [`string`][StringMl] | | [`PathBuf`][PathBuf] | [`string`][StringMl] | | [`BTreeMap`][BTreeMap] | [`Map`][Map] | | [`BTreeSet`][BTreeSet] | [`Set`][Set] | [OptionRs]: https://doc.rust-lang.org/beta/std/option/enum.Option.html [OptionMl]: https://caml.inria.fr/pub/docs/manual-ocaml/libref/Option.html [ResultRs]: https://doc.rust-lang.org/std/result/enum.Result.html [ResultMl]: https://caml.inria.fr/pub/docs/manual-ocaml/libref/Result.html [Vec]: https://doc.rust-lang.org/std/vec/struct.Vec.html [List]: https://caml.inria.fr/pub/docs/manual-ocaml/libref/List.html [StringRs]: https://doc.rust-lang.org/std/string/struct.String.html [StringMl]: https://caml.inria.fr/pub/docs/manual-ocaml/libref/String.html [PathBuf]: https://doc.rust-lang.org/std/path/struct.PathBuf.html [BTreeMap]: https://doc.rust-lang.org/std/collections/struct.BTreeMap.html [Map]: https://caml.inria.fr/pub/docs/manual-ocaml/libref/Map.html [BTreeSet]: https://doc.rust-lang.org/std/collections/struct.BTreeSet.html [Set]: https://caml.inria.fr/pub/docs/manual-ocaml/libref/Set.html See the [`impls`](../src/ocamlrep/impls.rs.html) submodule for examples of conversions for std types, and [Real World OCaml](https://dev.realworldocaml.org/runtime-memory-layout.html) and the [Interfacing C with OCaml](https://ocaml.org/manual/intfc.html) section of the OCaml manual for description of the OCaml representation of values. Since manually implementing OcamlRep is cumbersome and error-prone, the `ocamlrep_derive` crate provides a procedural macro for deriving the OcamlRep trait. When derived for custom types like this: ```ocaml use ocamlrep_derive::OcamlRep; #[derive(OcamlRep)] struct Foo { a: isize, b: Vec<bool>, } #[derive(OcamlRep)] enum Fruit { Apple, Orange(isize), Pear { num: isize }, Kiwi, } ``` It produces implementations of OcamlRep which construct values belonging to these roughly-equivalent OCaml types: ```ocaml type foo = { a: int; b: bool list; } type fruit = | Apple | Orange of int | Pear of { num: int } | Kiwi ``` The `oxidize` program at hphp/hack/src/hh_oxidize will take OCaml source files like the one above and generate Rust source files which define "roughly-equivalent" types (like the Rust source above). These types will derive an implementation of `OcamlRep` which will produce OCaml values belonging to the corresponding type in the OCaml source file. */ mod arena; mod block; mod cache; mod error; mod impls; mod value; pub mod from; pub mod ptr; pub mod rc; pub use arena::Arena; pub use block::ABSTRACT_TAG; pub use block::Block; pub use block::BlockBuilder; pub use block::BlockBytes; pub use block::CLOSURE_TAG; pub use block::CONT_TAG; pub use block::CUSTOM_TAG; pub use block::Color; pub use block::DOUBLE_ARRAY_TAG; pub use block::DOUBLE_TAG; pub use block::FORCING_TAG; pub use block::FORWARD_TAG; pub use block::Header; pub use block::INFIX_TAG; pub use block::LAZY_TAG; pub use block::NO_SCAN_TAG; pub use block::OBJECT_TAG; pub use block::STRING_TAG; pub use bumpalo::Bump; pub use cache::MemoizationCache; pub use error::FromError; pub use impls::OCamlInt; pub use impls::bytes_from_ocamlrep; pub use impls::bytes_to_ocamlrep; pub use impls::sorted_iter_to_ocaml_map; pub use impls::sorted_iter_to_ocaml_set; pub use impls::str_from_ocamlrep; pub use impls::str_to_ocamlrep; pub use impls::vec_from_ocaml_map; pub use impls::vec_from_ocaml_map_in; pub use impls::vec_from_ocaml_set; pub use impls::vec_from_ocaml_set_in; pub use ocamlrep_derive::FromOcamlRep; pub use ocamlrep_derive::FromOcamlRepIn; pub use ocamlrep_derive::ToOcamlRep; pub use value::Value; // 'mlvalues.h' pub const DOUBLE_WOSIZE: usize = std::mem::size_of::<f64>() / std::mem::size_of::<usize>(); // 'gc.h' pub const CAML_WHITE: usize = 0 << 8; pub const CAML_GRAY: usize = 1 << 8; pub const CAML_BLUE: usize = 2 << 8; pub const CAML_BLACK: usize = 3 << 8; /// A data structure that can be converted to an OCaml value. /// /// Types which implement both `ToOcamlRep` and `FromOcamlRep` (or /// `FromOcamlRepIn`) should provide compatible implementations thereof. /// In other words, it is expected that for any value with type `T`, /// `T::from_ocamlrep(value.to_ocamlrep(alloc)) == Ok(value)`. pub trait ToOcamlRep { /// Allocate an OCaml representation of `self` using the given Allocator. /// /// Implementors of this method must not mutate or drop any values after /// passing them to `Allocator::add` (or invoking `to_ocamlrep` on them), /// else `Allocator::memoized` may return incorrect results (this can /// generally only be done using internal-mutability types like `RefCell`, /// `Mutex`, or atomics, or by using `unsafe`). fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a>; } /// An interface for allocating OCaml values in some allocator-defined memory region. pub trait Allocator: Sized { /// Return a token which uniquely identifies this Allocator. The token must /// be unique (among all instances of all implementors of the Allocator /// trait), and an instance of an implementor of Allocator must return the /// same token throughout its entire lifetime. fn generation(&self) -> usize; /// Allocate a block with enough space for `size` fields, write its header, /// and return it. fn block_with_size_and_tag(&self, size: usize, tag: u8) -> BlockBuilder<'_>; /// Write the given value to the `index`th field of `block`. /// /// # Panics /// /// Panics if `index` is out of bounds for `block` (i.e., greater than or /// equal to the block's size). fn set_field<'a>(&self, block: &mut BlockBuilder<'a>, index: usize, value: Value<'a>); /// # Safety /// /// Must be used only with values allocated by this `Allocator`. The caller /// may assume the returned pointer is valid only until some other /// `Allocator` method is called on `self` (since an allocation may /// invalidate the pointed-to memory). /// /// Intended to be used only in implementations of `Allocator::set_field` /// and in conversion-to-OCaml functions requiring access to the raw memory /// of a block (e.g., `bytes_to_ocamlrep`). unsafe fn block_ptr_mut<'a>(&self, block: &mut BlockBuilder<'a>) -> *mut Value<'a>; #[inline(always)] fn block_with_size(&self, size: usize) -> BlockBuilder<'_> { self.block_with_size_and_tag(size, 0u8) } /// Convert the given data structure to an OCaml value. Structural sharing /// (via references or `Rc`) will not be preserved unless `add` is invoked /// within an outer invocation of `add_root`. /// /// To preserve structural sharing without using `add_root` (and the /// overhead of maintaining a cache that comes with it), consider using /// `ocamlrep::rc::RcOc` instead of `Rc`. #[inline(always)] fn add<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> { value.to_ocamlrep(self) } /// Convert the given `Copy` data structure to an OCaml value. #[inline(always)] fn add_copy<'a, T: ToOcamlRep + Copy + 'static>(&'a self, value: T) -> Value<'a> { let value_ref = &value; // SAFETY: add/to_ocamlrep cannot reference `value` in the `Value` // they return, and the `Copy + 'static` bounds ensure that we're not // working with a reference or Rc which might be invalidated. self.add(unsafe { std::mem::transmute::<&'_ T, &'a T>(value_ref) }) } /// Given the address and size of some value, and a function to convert the /// value to OCaml (e.g., a closure `|alloc| (*slice).to_ocamlrep(alloc)`), /// either execute the function and return its result, or return a cached /// result for that address. /// /// If `memoized` is invoked without an outer invocation of `add_root`, it /// must never return a cached result. If `memoized` is invoked within an /// outer invocation of `add_root`, it must return a result computed within /// that invocation of `add_root`. fn memoized<'a>( &'a self, ptr: usize, size_in_bytes: usize, f: impl FnOnce(&'a Self) -> Value<'a>, ) -> Value<'a>; /// Convert the given data structure to an OCaml value. Structural sharing /// (via references or `Rc`) will be preserved. /// /// Note that sharing is preserved using a memoization cache keyed off of /// address and size only. If the given value contains multiple references /// or slices pointing to equal-sized views of the same data, but with /// different OCaml representations, e.g.: /// /// ``` /// let x: &(u32, u32) = &(0u32, 1u32); /// let y: &u64 = unsafe { std::mem::transmute(x) }; /// let value = (x, y); /// alloc.add_root(&value) // PROBLEM! /// ``` /// /// Then the converted OCaml value will not have the intended type (i.e., /// the one which would be produced by `Allocator::add`). In this example, /// the allocator will memoize the result of converting `x` (an OCaml /// tuple), and use it for both `x` and `y` in `value` (since they point to /// equal-sized types at the same address). /// /// # Panics /// /// `add_root` is not re-entrant, and panics upon attempts to do so. fn add_root<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a>; /// Allocate a block with tag `STRING_TAG` and enough space for a string of /// `len` bytes. Write its header and return a `BlockBytes` wrapping the /// buffer and the block. fn byte_string_with_len(&self, len: usize) -> BlockBytes<'_> { let word_size = std::mem::size_of::<*const u8>(); let words = (len + 1/*null-ending*/).div_ceil(word_size); let length = words * word_size; let mut block = self.block_with_size_and_tag(words, STRING_TAG); unsafe { let block = self.block_ptr_mut(&mut block); *block.add(words - 1) = Value::from_bits(0); let block_bytes = block as *mut u8; *block_bytes.add(length - 1) = (length - len - 1) as u8; BlockBytes::new(std::slice::from_raw_parts_mut(block_bytes, len)) } } } /// A type which can be reconstructed from an OCaml value. /// /// Types which implement both `ToOcamlRep` and `FromOcamlRep` should provide /// compatible implementations thereof. In other words, it is expected that for /// any value, `T::from_ocamlrep(value.to_ocamlrep(alloc)) == Ok(value)`. pub trait FromOcamlRep: Sized { /// Convert the given ocamlrep Value to a value of type `Self`, if possible. fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError>; /// Convert the given OCaml value to a value of type `Self`, if possible. /// /// # Safety /// /// The given value must be a valid OCaml value. All values reachable from /// the given value must be valid OCaml values. None of these values may be /// naked pointers. None of these values may be modified while `from_ocaml` /// is running. unsafe fn from_ocaml(value: usize) -> Result<Self, FromError> { unsafe { Self::from_ocamlrep(Value::from_bits(value)) } } } /// A type which can be reconstructed from an OCaml value. /// /// Types which implement both `ToOcamlRep` and `FromOcamlRepIn` should provide /// compatible implementations thereof. In other words, it is expected that for /// any value, `T::from_ocamlrep_in(value.to_ocamlrep(alloc), bump) == Ok(value)`. pub trait FromOcamlRepIn<'a>: Sized { /// Convert the given ocamlrep Value to a value of type `Self`, allocated in /// the given arena. fn from_ocamlrep_in(value: Value<'_>, arena: &'a Bump) -> Result<Self, FromError>; }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/value.rs
ocamlrep/value.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::borrow::Cow; use std::collections::HashMap; use std::fmt; use std::fmt::Debug; use std::marker::PhantomData; use crate::Allocator; use crate::block; use crate::block::Block; use crate::block::Header; #[inline(always)] pub const fn is_ocaml_int(value: usize) -> bool { value & 1 == 1 } #[inline(always)] pub const fn isize_to_ocaml_int(value: isize) -> usize { ((value as usize) << 1) | 1 } #[inline(always)] pub const fn ocaml_int_to_isize(value: usize) -> isize { (value as isize) >> 1 } /// A value, as represented by OCaml. Valid, immutable, and immovable for /// lifetime `'a`. /// /// Either a tagged integer value or a pointer to a [`Block`](struct.Block.html) /// containing fields or binary data. #[repr(transparent)] #[derive(Clone, Copy, Hash, PartialEq, Eq)] pub struct Value<'a>(pub(crate) usize, PhantomData<&'a ()>); impl<'a> Value<'a> { #[inline(always)] pub const fn is_int(self) -> bool { is_ocaml_int(self.0) } #[inline(always)] pub const fn is_block(self) -> bool { !self.is_int() } #[inline(always)] pub const fn int(value: isize) -> Value<'static> { Value(isize_to_ocaml_int(value), PhantomData) } #[inline(always)] pub fn as_int(self) -> Option<isize> { if self.is_int() { Some(ocaml_int_to_isize(self.0)) } else { None } } #[inline(always)] pub fn as_block(self) -> Option<Block<'a>> { if self.is_int() { return None; } let block = unsafe { let ptr = self.0 as *const Value<'_>; let header = ptr.offset(-1); let size = Header::from_bits((*header).to_bits()).size() + 1; std::slice::from_raw_parts(header, size) }; Some(Block(block)) } #[inline(always)] pub fn as_float(self) -> Option<f64> { let block = self.as_block()?; if block.tag() != block::DOUBLE_TAG { return None; } Some(f64::from_bits(block[0].0 as u64)) } #[inline(always)] pub fn as_double_array(self) -> Option<&'a [f64]> { let block = self.as_block()?; if block.tag() != block::DOUBLE_ARRAY_TAG { return None; } Some(unsafe { std::slice::from_raw_parts(block.0.as_ptr().add(1) as *const f64, block.size()) }) } #[inline(always)] pub fn as_byte_string(self) -> Option<&'a [u8]> { let block = self.as_block()?; if block.tag() != block::STRING_TAG { return None; } let slice = unsafe { let size = block.size() * std::mem::size_of::<Value<'_>>(); let ptr = self.0 as *mut u8; let last_byte = ptr.offset(size as isize - 1); let padding = *last_byte; let size = size - padding as usize - 1; std::slice::from_raw_parts(ptr, size) }; Some(slice) } #[inline(always)] pub fn as_str(self) -> Option<Cow<'a, str>> { let slice = self.as_byte_string()?; Some(String::from_utf8_lossy(slice)) } #[inline(always)] pub fn field(self, index: usize) -> Option<Value<'a>> { self.field_ref(index).copied() } #[inline(always)] pub fn field_ref(self, index: usize) -> Option<&'a Value<'a>> { self.as_block()?.as_values()?.get(index) } /// Given a pointer to the first field of a [`Block`](struct.Block.html), /// create a pointer `Value` referencing that `Block`. /// /// # Safety /// /// This method is unsafe because it requires that the pointed-to Value is /// the first field of a block, which must be preceded by a valid Header /// correctly describing the block's size and tag (i.e., value.offset(-1) /// should point to that Header). To be used only with pointers returned by /// Arena allocation methods (e.g., /// [`Allocator::block_with_size_and_tag`](trait.Allocator.html#tymethod.block_with_size_and_tag). #[inline(always)] pub unsafe fn from_ptr(value: *const Value<'a>) -> Value<'a> { Value(value as usize, PhantomData) } /// # Safety /// /// The lifetime 'a returned is arbitrarily chosen, and does not necessarily /// reflect the actual lifetime of the data. If the given value is a /// pointer, it is up to the caller to ensure that for the duration of this /// lifetime, the block this pointer points to, and every value reachable /// from it, does not get written to. #[inline(always)] pub const unsafe fn from_bits(value: usize) -> Value<'a> { Value(value, PhantomData) } /// Convert this value to a usize, which can be handed to the OCaml runtime /// to be used as an OCaml value. Take care that the returned value does /// not outlive the arena. #[inline(always)] pub const fn to_bits(self) -> usize { self.0 } /// Helper for `Value::clone_with_allocator`. pub(crate) fn clone_with<'b>( self, alloc: &'b impl Allocator, seen: &mut HashMap<usize, Value<'b>>, ) -> Value<'b> { match self.as_block() { None => Value(self.0, PhantomData), Some(block) => { if let Some(&copied_value) = seen.get(&self.0) { return copied_value; } let copied_value = block.clone_with(alloc, seen); seen.insert(self.0, copied_value); copied_value } } } /// Recursively clone this `Value` using the given `Allocator`. Structural /// sharing is preserved (i.e., values which are physically equal before the /// clone will be physically equal after the clone). pub fn clone_with_allocator(self, alloc: &impl Allocator) -> Value<'_> { self.clone_with(alloc, &mut HashMap::new()) } } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.as_block() { None => write!(f, "{}", self.as_int().unwrap()), Some(block) => write!(f, "{block:?}"), } } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/arena.rs
ocamlrep/arena.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::cell::RefCell; use std::cmp::max; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use crate::Allocator; use crate::BlockBuilder; use crate::MemoizationCache; use crate::ToOcamlRep; use crate::Value; use crate::block::Header; struct Chunk { data: Box<[Value<'static>]>, index: usize, /// Pointer to the prev arena segment. prev: Option<Box<Chunk>>, } impl Chunk { fn with_capacity(capacity: usize) -> Self { Self { index: 0, data: vec![Value::int(0); capacity].into_boxed_slice(), prev: None, } } fn capacity(&self) -> usize { self.data.len() } fn can_fit(&self, requested_size: usize) -> bool { self.index + requested_size <= self.data.len() } #[inline] pub fn alloc(&mut self, requested_size: usize) -> &mut [Value<'static>] { let previous_index = self.index; self.index += requested_size; &mut self.data[previous_index..self.index] } } // The generation number is used solely to identify which arena a cached value // belongs to in `RcOc`. // // We use usize::max_value() / 2 here to avoid colliding with ocamlpool and // SlabAllocator generation numbers (ocamlpool starts at 0, and SlabAllocator // starts at usize::max_value() / 4). This generation trick isn't sound with the // use of multiple generation counters, but this mitigation should make it // extremely difficult to mix up values allocated with ocamlpool, Arena, and // SlabAllocator in practice (one would have to serialize the same value with // multiple Allocators, and only after increasing the generation of one by an // absurd amount). // // If we add more allocators, we might want to rethink this strategy. static NEXT_GENERATION: AtomicUsize = AtomicUsize::new(usize::MAX / 2); /// An [`Allocator`](trait.Allocator.html) which builds values in Rust-managed /// memory. The memory is freed when the Arena is dropped. pub struct Arena { generation: usize, current_chunk: RefCell<Chunk>, cache: MemoizationCache, } impl Default for Arena { /// Create a new Arena with 4KB of capacity preallocated. fn default() -> Self { Arena::new() } } impl Arena { /// Create a new Arena with 4KB of capacity preallocated. pub fn new() -> Self { Self::with_capacity(1024 * 4) } /// Create a new Arena with `capacity_in_bytes` preallocated. pub fn with_capacity(capacity_in_bytes: usize) -> Self { let generation = NEXT_GENERATION.fetch_add(1, Ordering::SeqCst); let capacity_in_words = max(2, capacity_in_bytes / std::mem::size_of::<Value<'_>>()); Self { generation, current_chunk: RefCell::new(Chunk::with_capacity(capacity_in_words)), cache: MemoizationCache::new(), } } #[inline] #[allow(clippy::mut_from_ref)] fn alloc<'a>(&'a self, requested_size: usize) -> &'a mut [Value<'a>] { if !self.current_chunk.borrow().can_fit(requested_size) { let prev_chunk_capacity = self.current_chunk.borrow().capacity(); let prev_chunk = self.current_chunk.replace(Chunk::with_capacity(max( requested_size * 2, prev_chunk_capacity, ))); self.current_chunk.borrow_mut().prev = Some(Box::new(prev_chunk)); } let mut chunk = self.current_chunk.borrow_mut(); let slice = chunk.alloc(requested_size); // Transmute the 'static lifetime to 'a, to allow Values which point to // blocks allocated using this Arena to be stored in other such blocks. // The lifetime ensures that callers cannot allow such Values to outlive // the arena (and therefore outlive the block they point to). This // transmute violates the 'static lifetime in Chunk, so it is critical // for safety that we never expose a view of those Values to code // outside this module (using the type `Value<'static>`). // Also transmute the unnamed lifetime referring to the mutable borrow // of `chunk` to 'a. This allows callers to hold multiple mutable blocks // at once. This is safe because the blocks handed out by Chunk::alloc // are non-overlapping, so there is no aliasing. unsafe { std::mem::transmute::<&'_ mut [Value<'static>], &'a mut [Value<'a>]>(slice) } } #[inline(always)] pub fn add<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> { value.to_ocamlrep(self) } #[inline(always)] pub fn add_root<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> { Allocator::add_root(self, value) } } impl Allocator for Arena { #[inline(always)] fn generation(&self) -> usize { self.generation } fn block_with_size_and_tag(&self, size: usize, tag: u8) -> BlockBuilder<'_> { let block = self.alloc(size + 1); let header = Header::new(size, tag); // Safety: We need to make sure that the Header written to index 0 of // this slice is never observed as a Value. We guarantee that by not // exposing raw Chunk memory--only allocated Values. block[0] = unsafe { Value::from_bits(header.to_bits()) }; let slice = &mut block[1..]; BlockBuilder::new(slice) } #[inline(always)] fn set_field<'a>(&self, block: &mut BlockBuilder<'a>, index: usize, value: Value<'a>) { unsafe { *self.block_ptr_mut(block).add(index) = value } } unsafe fn block_ptr_mut<'a>(&self, block: &mut BlockBuilder<'a>) -> *mut Value<'a> { block.address() as *mut _ } fn memoized<'a>( &'a self, ptr: usize, size: usize, f: impl FnOnce(&'a Self) -> Value<'a>, ) -> Value<'a> { let bits = self.cache.memoized(ptr, size, || f(self).to_bits()); // SAFETY: The only memoized values in the cache are those computed in // the closure on the previous line. Since f returns Value<'a>, any // cached bits must represent a valid Value<'a>, unsafe { Value::from_bits(bits) } } fn add_root<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> { self.cache.with_cache(|| value.to_ocamlrep(self)) } } #[cfg(test)] mod tests { use std::io::Write; use std::time::Instant; use super::*; #[test] fn test_alloc_byte_string_with_len() { let arena = Arena::with_capacity(1000); let msg: &[u8] = b"Hello world!"; let mut w = arena.byte_string_with_len(msg.len()); let bytes_written = w.write(msg).ok().unwrap(); assert!(bytes_written == msg.len()); let value: Value<'_> = w.build(); assert_eq!(value.as_byte_string().unwrap(), msg); } #[test] fn test_alloc_block_of_three_fields() { let arena = Arena::with_capacity(1000); let mut block = arena.block_with_size(3); arena.set_field(&mut block, 0, Value::int(1)); arena.set_field(&mut block, 1, Value::int(2)); arena.set_field(&mut block, 2, Value::int(3)); let block = block.build().as_block().unwrap(); assert_eq!(block.size(), 3); assert_eq!(block[0].as_int().unwrap(), 1); assert_eq!(block[1].as_int().unwrap(), 2); assert_eq!(block[2].as_int().unwrap(), 3); } #[test] fn test_large_allocs() { let arena = Arena::with_capacity(1000); let alloc_block = |size| arena.block_with_size(size).build().as_block().unwrap(); let max = alloc_block(1000); assert_eq!(max.size(), 1000); let two_thousand = alloc_block(2000); assert_eq!(two_thousand.size(), 2000); let four_thousand = alloc_block(4000); assert_eq!(four_thousand.size(), 4000); } #[test] fn perf_test() { let arena = Arena::with_capacity(10_000); let alloc_block = |size| arena.block_with_size(size).build().as_block().unwrap(); println!("Benchmarks for allocating [1] 200,000 times"); let now = Instant::now(); for _ in 0..200_000 { vec![0; 1].into_boxed_slice(); } println!("Alloc: {:?}", now.elapsed()); let now = Instant::now(); for _ in 0..200_000 { alloc_block(1); } println!("Arena: {:?}", now.elapsed()); println!("Benchmarks for allocating [5] 200,000 times"); let now = Instant::now(); for _ in 0..200_000 { vec![0; 5].into_boxed_slice(); } println!("Alloc: {:?}", now.elapsed()); let now = Instant::now(); for _ in 0..200_000 { alloc_block(5); } println!("Arena: {:?}", now.elapsed()); println!("Benchmarks for allocating [10] 200,000 times"); let now = Instant::now(); for _ in 0..200_000 { vec![0; 10].into_boxed_slice(); } println!("Alloc: {:?}", now.elapsed()); let now = Instant::now(); for _ in 0..200_000 { alloc_block(10); } println!("Arena: {:?}", now.elapsed()); } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/block.rs
ocamlrep/block.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::collections::HashMap; use std::fmt; use std::fmt::Debug; use std::ops::Index; use crate::Allocator; use crate::Value; /// Blocks with tags greater than or equal to NO_SCAN_TAG contain binary data, /// and are not scanned by the garbage collector. /// /// Likewise, we must avoid interpreting the fields of blocks with /// such tags as Values. pub const NO_SCAN_TAG: u8 = 251; pub const FORWARD_TAG: u8 = 250; pub const INFIX_TAG: u8 = 249; pub const OBJECT_TAG: u8 = 248; pub const CLOSURE_TAG: u8 = 247; pub const LAZY_TAG: u8 = 246; pub const CONT_TAG: u8 = 245; pub const FORCING_TAG: u8 = 244; pub const ABSTRACT_TAG: u8 = 251; pub const STRING_TAG: u8 = 252; pub const DOUBLE_TAG: u8 = 253; pub const DOUBLE_ARRAY_TAG: u8 = 254; pub const CUSTOM_TAG: u8 = 255; /// A recently-allocated, not-yet-finalized Block. #[repr(transparent)] pub struct BlockBuilder<'a> { fields: &'a mut [Value<'a>], } impl<'a> BlockBuilder<'a> { /// `address` may be a pointer or an offset (the `Allocator` which invokes /// `BlockBuilder::new` determines the meaning of `BlockBuilder` addresses). /// `size` must be greater than 0 and denotes the number of fields in the /// block. /// /// # Panics /// /// Panics if `fields.is_empty()`. #[inline(always)] pub fn new(fields: &'a mut [Value<'a>]) -> Self { if fields.is_empty() { panic!() } Self { fields } } /// The address of the field slice passed to `BlockBuilder::new`. #[inline(always)] pub fn address(&self) -> usize { self.fields.as_ptr() as _ } /// The number of fields in this block. #[inline(always)] pub fn size(&self) -> usize { self.fields.len() } // TODO(jakebailey): This needs to be marked unsafe. The caller must // initialize all of the fields. #[inline(always)] pub fn build(self) -> Value<'a> { unsafe { Value::from_ptr(self.fields.as_ptr()) } } } /// The contents of an OCaml block, consisting of a header and one or more /// fields of type [`Value`](struct.Value.html). #[repr(transparent)] #[derive(Clone, Copy)] pub struct Block<'arena>(pub(crate) &'arena [Value<'arena>]); impl<'a> Block<'a> { #[inline(always)] pub fn header(self) -> Header { Header(self.0[0].0) } #[inline(always)] pub fn size(self) -> usize { self.header().size() } #[inline(always)] pub fn tag(self) -> u8 { self.header().tag() } #[inline(always)] pub fn as_value(self) -> Value<'a> { unsafe { Value::from_ptr(&self.0[1]) } } #[inline(always)] pub fn as_values(self) -> Option<&'a [Value<'a>]> { if self.tag() >= NO_SCAN_TAG { return None; } Some(&self.0[1..]) } #[inline(always)] pub fn as_int_slice(self) -> &'a [usize] { let slice = &self.0[1..]; unsafe { std::slice::from_raw_parts(slice.as_ptr().cast(), slice.len()) } } /// Helper for `Value::clone_with_allocator`. pub(crate) fn clone_with<'b, A: Allocator>( self, alloc: &'b A, seen: &mut HashMap<usize, Value<'b>>, ) -> Value<'b> { let mut block = alloc.block_with_size_and_tag(self.size(), self.tag()); match self.as_values() { Some(fields) => { for (i, field) in fields.iter().enumerate() { let field = field.clone_with(alloc, seen); alloc.set_field(&mut block, i, field) } } None => { // Safety: Both pointers must be valid, aligned, and // non-overlapping. Both pointers are the heads of blocks which // came from some Allocator. Allocators are required to allocate // blocks with usize-aligned pointers, and those blocks are // required to be valid for reads and writes for the number of // usize-sized fields reported in the size in their header. // Allocators are also required to allocate non-overlapping // blocks. unsafe { std::ptr::copy_nonoverlapping( self.0.as_ptr().offset(1) as *const usize, alloc.block_ptr_mut(&mut block) as *mut usize, self.size(), ) } } } block.build() } } impl<'a> Index<usize> for Block<'a> { type Output = Value<'a>; #[inline(always)] fn index(&self, index: usize) -> &Self::Output { &self.0[index + 1] } } impl Debug for Block<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.tag() == STRING_TAG { write!(f, "{:?}", self.as_value().as_str().unwrap()) } else if self.tag() == DOUBLE_TAG { write!(f, "{:?}", self.as_value().as_float().unwrap()) } else { write!(f, "{}{:?}", self.tag(), self.as_values().unwrap()) } } } // values from ocaml 'gc.h' #[repr(usize)] #[derive(Clone, Copy)] pub enum Color { White = crate::CAML_WHITE, Gray = crate::CAML_GRAY, Blue = crate::CAML_BLUE, Black = crate::CAML_BLACK, } #[repr(transparent)] #[derive(Clone, Copy)] pub struct Header(usize); impl Header { #[inline(always)] pub const fn new(size: usize, tag: u8) -> Self { Self::with_color(size, tag, Color::White) } #[inline(always)] pub const fn with_color(size: usize, tag: u8, color: Color) -> Self { let bits = size << 10 | (color as usize) | (tag as usize); Header(bits) } #[inline(always)] pub const fn size(self) -> usize { self.0 >> 10 } #[inline(always)] pub const fn tag(self) -> u8 { self.0 as u8 } #[inline(always)] pub const fn color(self) -> Color { match self.0 & Color::Black as usize { crate::CAML_WHITE => Color::White, crate::CAML_GRAY => Color::Gray, crate::CAML_BLUE => Color::Blue, crate::CAML_BLACK => Color::Black, _ => unreachable!(), } } #[inline(always)] pub const fn from_bits(bits: usize) -> Self { Header(bits) } #[inline(always)] pub const fn to_bits(self) -> usize { self.0 } } impl Debug for Header { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Header") .field("size", &self.size()) .field("tag", &self.tag()) .finish() } } pub struct BlockBytes<'a>(&'a mut [u8], *const u8); impl<'a> BlockBytes<'a> { // SAFETY: `bytes` must be the data segment of a valid block with // `STRING_TAG`, excluding the trailing padding bytes; see // `Allocator::byte_string_with_len` pub(crate) unsafe fn new(bytes: &'a mut [u8]) -> Self { BlockBytes(bytes, bytes.as_ptr()) } } impl std::io::Write for BlockBytes<'_> { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { // If the number of bytes to be written exceeds the buffer size, error, // don't do short writes! self.0.write_all(buf)?; Ok(buf.len()) } fn flush(&mut self) -> std::io::Result<()> { self.0.flush() } } impl<'a> BlockBytes<'a> { pub fn build(self) -> Value<'a> { let BlockBytes(slice, block) = self; if !slice.is_empty() { panic!("ocamlrep: BlockBytes invariant violation: not all bytes written to."); } unsafe { Value::from_bits(block as usize) } } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/error.rs
ocamlrep/error.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::error::Error; use std::fmt; use std::num::TryFromIntError; use std::str::Utf8Error; /// Returned by /// [`OcamlRep::from_ocamlrep`](trait.OcamlRep.html#tymethod.from_ocamlrep) when /// the given [`Value`](struct.Value.html) cannot be converted to a Rust value /// of the expected type. #[derive(Debug, PartialEq)] pub enum FromError { BadUtf8(Utf8Error), BlockTagOutOfRange { max: u8, actual: u8 }, ErrorInField(usize, Box<FromError>), ExpectedBlock(isize), ExpectedBlockTag { expected: u8, actual: u8 }, ExpectedBool(isize), ExpectedChar(isize), ExpectedInt(usize), Expected63BitInt(isize), ExpectedUnit(isize), ExpectedZeroTag(u8), IntOutOfRange(TryFromIntError), NullaryVariantTagOutOfRange { max: usize, actual: isize }, WrongBlockSize { expected: usize, actual: usize }, UnexpectedCustomOps { expected: usize, actual: usize }, } impl std::convert::From<TryFromIntError> for FromError { fn from(error: TryFromIntError) -> Self { FromError::IntOutOfRange(error) } } impl std::convert::From<Utf8Error> for FromError { fn from(error: Utf8Error) -> Self { FromError::BadUtf8(error) } } impl fmt::Display for FromError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use FromError::*; match self { BadUtf8(_) => write!(f, "Invalid UTF-8"), BlockTagOutOfRange { max, actual } => { write!(f, "Expected tag value <= {max}, but got {actual}") } ErrorInField(idx, _) => write!(f, "Failed to convert field {idx}"), ExpectedBlock(x) => write!(f, "Expected block, but got integer value {x}"), ExpectedBlockTag { expected, actual } => { write!(f, "Expected block with tag {expected}, but got {actual}",) } ExpectedBool(x) => write!(f, "Expected bool, but got {x}"), ExpectedChar(x) => write!(f, "Expected char, but got {x}"), ExpectedInt(x) => { write!(f, "Expected integer value, but got block pointer {x:p}") } Expected63BitInt(x) => write!( f, "Expected integer value between -2^(n-2) and 2^(n-2)-1, where n is the number of bits in isize, but got {x}", ), ExpectedUnit(x) => write!(f, "Expected (), but got {x}"), ExpectedZeroTag(x) => write!( f, "Expected block with tag 0 (tuple, record, cons cell, etc), but got tag value {x}", ), IntOutOfRange(_) => write!(f, "Integer value out of range"), NullaryVariantTagOutOfRange { max, actual } => write!( f, "Expected nullary variant tag, where 0 <= tag <= {max}, but got {actual}", ), WrongBlockSize { expected, actual } => write!( f, "Expected block of size {expected}, but got size {actual}", ), UnexpectedCustomOps { expected, actual } => write!( f, "Expected custom operations struct address 0x{expected:x}, but got address 0x{actual:x}", ), } } } impl Error for FromError { fn source(&self) -> Option<&(dyn Error + 'static)> { use FromError::*; match self { BadUtf8(err) => Some(err), ErrorInField(_, err) => Some(err), IntOutOfRange(err) => Some(err), BlockTagOutOfRange { .. } | ExpectedBlock(..) | ExpectedBlockTag { .. } | ExpectedBool(..) | ExpectedChar(..) | ExpectedInt(..) | Expected63BitInt(..) | ExpectedUnit(..) | ExpectedZeroTag(..) | NullaryVariantTagOutOfRange { .. } | WrongBlockSize { .. } | UnexpectedCustomOps { .. } => None, } } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/rc.rs
ocamlrep/rc.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. //! Provides `RcOc`, a single-threaded reference-counting pointer. `RcOc` stands //! for "reference counted with Ocaml-value cache". use std::cell::Cell; use std::cmp::Ordering; use std::fmt; use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; use std::rc::Rc; use bumpalo::Bump; use serde::Deserialize; use serde::Deserializer; use serde::Serialize; use serde::Serializer; use crate::Allocator; use crate::FromError; use crate::FromOcamlRep; use crate::ToOcamlRep; use crate::Value; const UNIT: usize = crate::value::isize_to_ocaml_int(0); const INVALID_GENERATION: usize = usize::MAX; struct OcamlValueCache<T> { forward_pointer: Cell<usize>, generation: Cell<usize>, value: T, } impl<T> OcamlValueCache<T> { #[inline(always)] pub fn new(value: T) -> Self { OcamlValueCache { forward_pointer: Cell::new(UNIT), generation: Cell::new(INVALID_GENERATION), value, } } #[inline(always)] pub fn get_in_generation(&self, generation: usize) -> Option<usize> { if generation == self.generation.get() { Some(self.forward_pointer.get()) } else { None } } #[inline(always)] pub fn set(&self, value: usize, generation: usize) { assert!(generation != INVALID_GENERATION); self.forward_pointer.set(value); self.generation.set(generation); } #[inline(always)] fn clear(&self) { self.forward_pointer.set(UNIT); self.generation.set(INVALID_GENERATION); } } impl<T: Clone> Clone for OcamlValueCache<T> { #[inline(always)] fn clone(&self) -> Self { OcamlValueCache::new(self.value.clone()) } } #[allow(clippy::partialeq_ne_impl)] impl<T: PartialEq> PartialEq for OcamlValueCache<T> { #[allow(clippy::unconditional_recursion)] #[inline(always)] fn eq(&self, other: &OcamlValueCache<T>) -> bool { self.value.eq(&other.value) } #[allow(clippy::unconditional_recursion)] #[inline(always)] fn ne(&self, other: &OcamlValueCache<T>) -> bool { self.value.ne(&other.value) } } impl<T: Eq> Eq for OcamlValueCache<T> {} /// A single-threaded reference-counting pointer type, which, as a performance /// optimization, can cache the result of converting the pointed-to value to an /// OCaml value. /// /// `RcOc` stands for "reference counted with Ocaml-value cache". /// /// Internally uses `std::rc::Rc`, so restrictions on `Rc` also apply to `RcOc`. /// It is encouraged to follow `Rc` conventions (such as preferring the use of /// `RcOc::clone(x)` rather than `x.clone()`) when using `RcOc`. pub struct RcOc<T> { ptr: Rc<OcamlValueCache<T>>, } impl<T> RcOc<T> { #[inline(always)] pub fn new(value: T) -> Self { Self { ptr: Rc::new(OcamlValueCache::new(value)), } } #[inline(always)] pub fn get_cached_value_in_generation(&self, generation: usize) -> Option<usize> { (*self.ptr).get_in_generation(generation) } #[inline(always)] pub fn set_cached_value(&self, value: usize, generation: usize) { (*self.ptr).set(value, generation) } #[inline(always)] pub fn clear_cache(&self) { (*self.ptr).clear(); } #[inline(always)] pub fn get_mut(this: &mut Self) -> Option<&mut T> { Rc::get_mut(&mut this.ptr).map(|cache| { // We are about to give permission to mutate the value, so // invalidate the cache. cache.clear(); &mut cache.value }) } #[inline(always)] pub fn ptr_eq(this: &Self, other: &Self) -> bool { Rc::ptr_eq(&this.ptr, &other.ptr) } /// Returns the inner value, if the `Rc` has exactly one strong reference. /// /// Otherwise, an `Err` is returned with the same `Rc` that was passed in. /// /// This will succeed even if there are outstanding weak references. #[inline(always)] pub fn try_unwrap(this: Self) -> Result<T, Self> { match Rc::try_unwrap(this.ptr) { Ok(cache) => Ok(cache.value), Err(ptr) => Err(Self { ptr }), } } } impl<T: Clone> RcOc<T> { #[inline(always)] pub fn make_mut(this: &mut Self) -> &mut T { // If the refcount is 1, Rc::make_mut will give permission to mutate the // value (rather than cloning it), so invalidate the cache. if Rc::strong_count(&this.ptr) == 1 { this.clear_cache(); } &mut Rc::make_mut(&mut this.ptr).value } } impl<T> AsRef<T> for RcOc<T> { #[inline(always)] fn as_ref(&self) -> &T { &self.ptr.as_ref().value } } impl<T> Clone for RcOc<T> { #[inline(always)] fn clone(&self) -> Self { Self { ptr: Rc::clone(&self.ptr), } } } impl<T> Deref for RcOc<T> { type Target = T; #[inline(always)] fn deref(&self) -> &T { &self.ptr.deref().value } } #[allow(clippy::partialeq_ne_impl)] impl<T: PartialEq> PartialEq for RcOc<T> { #[inline(always)] fn eq(&self, other: &RcOc<T>) -> bool { self.ptr.eq(&other.ptr) } #[inline(always)] fn ne(&self, other: &RcOc<T>) -> bool { self.ptr.ne(&other.ptr) } } impl<T: Eq> Eq for RcOc<T> {} impl<T: PartialOrd> PartialOrd for RcOc<T> { #[inline(always)] fn partial_cmp(&self, other: &RcOc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } #[inline(always)] fn lt(&self, other: &RcOc<T>) -> bool { **self < **other } #[inline(always)] fn le(&self, other: &RcOc<T>) -> bool { **self <= **other } #[inline(always)] fn gt(&self, other: &RcOc<T>) -> bool { **self > **other } #[inline(always)] fn ge(&self, other: &RcOc<T>) -> bool { **self >= **other } } impl<T: Ord> Ord for RcOc<T> { #[inline] fn cmp(&self, other: &RcOc<T>) -> Ordering { (**self).cmp(&**other) } } impl<T: Hash> Hash for RcOc<T> { fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state); } } impl<T: fmt::Display> fmt::Display for RcOc<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl<T: fmt::Debug> fmt::Debug for RcOc<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<T> fmt::Pointer for RcOc<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&(&**self as *const T), f) } } impl<T: ToOcamlRep> ToOcamlRep for RcOc<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { let generation = alloc.generation(); match self.get_cached_value_in_generation(generation) { Some(value) => unsafe { Value::from_bits(value) }, None => { let value = alloc.add(self.as_ref()); self.set_cached_value(value.to_bits(), generation); value } } } } impl<T: FromOcamlRep> FromOcamlRep for RcOc<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { // NB: We don't get any sharing this way. Ok(RcOc::new(T::from_ocamlrep(value)?)) } } impl<'a, T: FromOcamlRep> crate::FromOcamlRepIn<'a> for RcOc<T> { fn from_ocamlrep_in(value: Value<'_>, _alloc: &'a Bump) -> Result<Self, FromError> { Self::from_ocamlrep(value) } } impl<T: Serialize> Serialize for RcOc<T> { #[inline] fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { (**self).serialize(serializer) } } impl<'de, T: Deserialize<'de>> Deserialize<'de> for RcOc<T> { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { // NB: We don't get any sharing this way. // FWIW, looks like serde doesn't preserve sharing for Rc either. // https://github.com/serde-rs/serde/blob/a00aee14950baca7de2e334b895e203b013712da/serde/src/de/impls.rs#L1806-L1808 Deserialize::deserialize(deserializer).map(RcOc::new) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/impls.rs
ocamlrep/impls.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::borrow::Borrow; use std::borrow::Cow; use std::cell::Cell; use std::cell::RefCell; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::ffi::OsStr; use std::ffi::OsString; use std::hash::BuildHasher; use std::hash::Hash; use std::mem::size_of; use std::path::Path; use std::path::PathBuf; use std::rc::Rc; use std::sync::Arc; use bstr::BStr; use bstr::BString; use bumpalo::Bump; use indexmap::IndexMap; use indexmap::IndexSet; use serde::Deserialize; use serde::Serialize; use crate::Allocator; use crate::FromError; use crate::FromOcamlRep; use crate::FromOcamlRepIn; use crate::ToOcamlRep; use crate::Value; use crate::block; use crate::from; macro_rules! trivial_from_in_impl { ($ty:ty) => { impl<'a> FromOcamlRepIn<'a> for $ty { fn from_ocamlrep_in(value: Value<'_>, _alloc: &'a Bump) -> Result<Self, FromError> { Self::from_ocamlrep(value) } } }; } impl ToOcamlRep for () { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int(0) } } impl FromOcamlRep for () { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { match from::expect_int(value)? { 0 => Ok(()), x => Err(FromError::ExpectedUnit(x)), } } } trivial_from_in_impl!(()); /// Represents an integer in the range [-2^(n-2); 2^(n-2)[, /// which can be safely converted to OCaml int without changing /// the represented int value. #[derive(PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, Debug)] pub struct OCamlInt(isize); impl OCamlInt { const NBITS: usize = std::mem::size_of::<isize>() * 8; /// Creates an OCamlInt from an isize. Error if the /// isize is outside the safe range [-2^(n-2); 2^(n-2)[. pub fn try_new(i: isize) -> Result<Self, FromError> { // Check that the two most significant bits are the same. let msbs_mask: usize = 0b11 << (Self::NBITS - 2); let msbs: usize = (i as usize) & msbs_mask; if msbs == 0 || msbs == msbs_mask { Ok(Self(i)) } else { Err(FromError::Expected63BitInt(i)) } } /// Creates an OCamlInt from an isize. /// This always succeeds, and might flip the MSB (the sign bit) /// to bring the integer value in the safe range [-2^(n-2); 2^(n-2)[. /// This is useful to preserve ordering during conversion: /// For example, you should probably never convert a BTreeSet<isize>, /// but convert a BTreeSet<OCamlInt> instead. pub fn new_erase_msb(i: isize) -> Self { // This could in theory be done with `(i << 1) >> 1`, // But that relies on obscure rules for how << and >> operate on isize. // So we go the explicit way let n = Self::NBITS; let msb2 = i & (1 << (n - 2)); let with_reset_msb = i & !(1 << (n - 1)); let res = with_reset_msb | (msb2 << 1); Self(res) } } impl ToOcamlRep for OCamlInt { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int(self.0) } } impl FromOcamlRep for OCamlInt { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { from::expect_int(value).map(Self) } } impl ToOcamlRep for isize { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int(*self) } } impl FromOcamlRep for isize { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { from::expect_int(value) } } trivial_from_in_impl!(isize); impl ToOcamlRep for usize { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int((*self).try_into().unwrap()) } } impl FromOcamlRep for usize { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(from::expect_int(value)?.try_into()?) } } trivial_from_in_impl!(usize); impl ToOcamlRep for i64 { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int((*self).try_into().unwrap()) } } impl FromOcamlRep for i64 { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(from::expect_int(value)?.try_into()?) } } trivial_from_in_impl!(i64); impl ToOcamlRep for u64 { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int((*self).try_into().unwrap()) } } impl FromOcamlRep for u64 { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(from::expect_int(value)?.try_into()?) } } trivial_from_in_impl!(u64); impl ToOcamlRep for i32 { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int((*self).try_into().unwrap()) } } impl FromOcamlRep for i32 { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(from::expect_int(value)?.try_into()?) } } trivial_from_in_impl!(i32); impl ToOcamlRep for u32 { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int((*self).try_into().unwrap()) } } impl FromOcamlRep for u32 { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(from::expect_int(value)?.try_into()?) } } trivial_from_in_impl!(u32); impl ToOcamlRep for bool { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { Value::int((*self).into()) } } impl FromOcamlRep for bool { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { match from::expect_int(value)? { 0 => Ok(false), 1 => Ok(true), x => Err(FromError::ExpectedBool(x)), } } } trivial_from_in_impl!(bool); impl ToOcamlRep for char { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { assert!(*self as u32 <= 255, "char out of range: {self}"); Value::int(*self as isize) } } impl FromOcamlRep for char { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let c = from::expect_int(value)?; if (0..=255).contains(&c) { Ok(c as u8 as char) } else { Err(FromError::ExpectedChar(c)) } } } trivial_from_in_impl!(char); impl ToOcamlRep for f64 { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { let mut block = alloc.block_with_size_and_tag(1, block::DOUBLE_TAG); alloc.set_field(&mut block, 0, unsafe { Value::from_bits(self.to_bits() as usize) }); block.build() } } impl FromOcamlRep for f64 { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let block = from::expect_block_with_size_and_tag(value, 1, block::DOUBLE_TAG)?; Ok(f64::from_bits(block[0].0 as u64)) } } trivial_from_in_impl!(f64); impl<T: ToOcamlRep + Sized> ToOcamlRep for Box<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(&**self) } } impl<T: FromOcamlRep + Sized> FromOcamlRep for Box<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(Box::new(T::from_ocamlrep(value)?)) } } impl<T: ToOcamlRep + Sized> ToOcamlRep for &'_ T { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.memoized( *self as *const T as *const usize as usize, size_of::<T>(), |alloc| (**self).to_ocamlrep(alloc), ) } } impl<'a, T: FromOcamlRepIn<'a>> FromOcamlRepIn<'a> for &'a T { fn from_ocamlrep_in(value: Value<'_>, alloc: &'a Bump) -> Result<Self, FromError> { // NB: We don't get any sharing this way. Ok(alloc.alloc(T::from_ocamlrep_in(value, alloc)?)) } } impl<T: ToOcamlRep + Sized> ToOcamlRep for Rc<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.memoized( self.as_ref() as *const T as usize, size_of::<T>(), |alloc| alloc.add(self.as_ref()), ) } } impl<T: FromOcamlRep> FromOcamlRep for Rc<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { // NB: We don't get any sharing this way. Ok(Rc::new(T::from_ocamlrep(value)?)) } } impl<T: ToOcamlRep + Sized> ToOcamlRep for Arc<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.memoized( self.as_ref() as *const T as usize, size_of::<T>(), |alloc| alloc.add(self.as_ref()), ) } } impl<T: FromOcamlRep> FromOcamlRep for Arc<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { // NB: We don't get any sharing this way. Ok(Arc::new(T::from_ocamlrep(value)?)) } } impl<T: ToOcamlRep> ToOcamlRep for RefCell<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { let mut block = alloc.block_with_size(1); let value_ref: std::cell::Ref<'a, T> = self.borrow(); alloc.set_field(&mut block, 0, alloc.add(&*value_ref)); // SAFETY: the `&'a self` lifetime is intended to ensure that our `T` is // not mutated or dropped during the to-OCaml conversion, in order to // ensure that the allocator's memoization table isn't invalidated. We // can't guarantee that statically for types with internal mutability, // so the `ToOcamlRep` docs ask the caller to promise not to mutate or // drop these values. If they violate that requirement, the allocator // may give stale results in the event of aliasing, which is definitely // undesirable, but does not break type safety on the Rust side. The // allocator ties the lifetime of the Value we're returning to our // local variable `value_ref`, but it doesn't actually reference that // local, so it's safe to cast the lifetime away. unsafe { std::mem::transmute::<Value<'_>, Value<'a>>(block.build()) } } } impl<T: Copy + ToOcamlRep> ToOcamlRep for Cell<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { let mut block = alloc.block_with_size(1); let value_copy = self.get(); alloc.set_field(&mut block, 0, alloc.add(&value_copy)); // SAFETY: as above with RefCell, we need to cast away the lifetime to // deal with internal mutability. unsafe { std::mem::transmute::<Value<'_>, Value<'a>>(block.build()) } } } impl<T: FromOcamlRep> FromOcamlRep for Cell<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let block = from::expect_tuple(value, 1)?; let value: T = from::field(block, 0)?; Ok(Cell::new(value)) } } impl<'a, T: FromOcamlRepIn<'a>> FromOcamlRepIn<'a> for Cell<T> { fn from_ocamlrep_in(value: Value<'_>, alloc: &'a Bump) -> Result<Self, FromError> { let block = from::expect_tuple(value, 1)?; let value: T = from::field_in(block, 0, alloc)?; Ok(Cell::new(value)) } } impl<T: FromOcamlRep> FromOcamlRep for RefCell<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let block = from::expect_tuple(value, 1)?; let value: T = from::field(block, 0)?; Ok(RefCell::new(value)) } } impl<'a, T: FromOcamlRepIn<'a>> FromOcamlRepIn<'a> for RefCell<T> { fn from_ocamlrep_in(value: Value<'_>, alloc: &'a Bump) -> Result<Self, FromError> { let block = from::expect_tuple(value, 1)?; let value: T = from::field_in(block, 0, alloc)?; Ok(RefCell::new(value)) } } impl<T: ToOcamlRep> ToOcamlRep for Option<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { match self { None => Value::int(0), Some(val) => { let mut block = alloc.block_with_size(1); alloc.set_field(&mut block, 0, alloc.add(val)); block.build() } } } } impl<T: FromOcamlRep> FromOcamlRep for Option<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { if value.is_int() { let _ = from::expect_nullary_variant(value, 0)?; Ok(None) } else { let block = from::expect_block_with_size_and_tag(value, 1, 0)?; Ok(Some(from::field(block, 0)?)) } } } impl<'a, T: FromOcamlRepIn<'a>> FromOcamlRepIn<'a> for Option<T> { fn from_ocamlrep_in(value: Value<'_>, alloc: &'a Bump) -> Result<Self, FromError> { if value.is_int() { let _ = from::expect_nullary_variant(value, 0)?; Ok(None) } else { let block = from::expect_block_with_size_and_tag(value, 1, 0)?; Ok(Some(from::field_in(block, 0, alloc)?)) } } } impl<T: ToOcamlRep, E: ToOcamlRep> ToOcamlRep for Result<T, E> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { match self { Ok(val) => { let mut block = alloc.block_with_size(1); alloc.set_field(&mut block, 0, alloc.add(val)); block.build() } Err(val) => { let mut block = alloc.block_with_size_and_tag(1, 1); alloc.set_field(&mut block, 0, alloc.add(val)); block.build() } } } } impl<T: FromOcamlRep, E: FromOcamlRep> FromOcamlRep for Result<T, E> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let block = from::expect_block(value)?; match block.tag() { 0 => Ok(Ok(from::field(block, 0)?)), 1 => Ok(Err(from::field(block, 0)?)), t => Err(FromError::BlockTagOutOfRange { max: 1, actual: t }), } } } impl<'a, T: FromOcamlRepIn<'a>, E: FromOcamlRepIn<'a>> FromOcamlRepIn<'a> for Result<T, E> { fn from_ocamlrep_in(value: Value<'_>, alloc: &'a Bump) -> Result<Self, FromError> { let block = from::expect_block(value)?; match block.tag() { 0 => Ok(Ok(from::field_in(block, 0, alloc)?)), 1 => Ok(Err(from::field_in(block, 0, alloc)?)), t => Err(FromError::BlockTagOutOfRange { max: 1, actual: t }), } } } impl<T: ToOcamlRep> ToOcamlRep for [T] { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { let mut hd = alloc.add(&()); for val in self.iter().rev() { let mut block = alloc.block_with_size(2); alloc.set_field(&mut block, 0, alloc.add(val)); alloc.set_field(&mut block, 1, hd); hd = block.build(); } hd } } impl<T: ToOcamlRep> ToOcamlRep for &'_ [T] { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.memoized( self.as_ptr() as usize, std::mem::size_of_val(*self), |alloc| (**self).to_ocamlrep(alloc), ) } } impl<'a, T: FromOcamlRepIn<'a>> FromOcamlRepIn<'a> for &'a [T] { fn from_ocamlrep_in(value: Value<'_>, alloc: &'a Bump) -> Result<Self, FromError> { let mut len = 0usize; let mut hd = value; while !hd.is_int() { let block = from::expect_tuple(hd, 2)?; len += 1; hd = block[1]; } let hd = hd.as_int().unwrap(); if hd != 0 { return Err(FromError::ExpectedUnit(hd)); } let mut vec = bumpalo::collections::Vec::with_capacity_in(len, alloc); let mut hd = value; while !hd.is_int() { let block = from::expect_tuple(hd, 2).unwrap(); vec.push(from::field_in(block, 0, alloc)?); hd = block[1]; } Ok(vec.into_bump_slice()) } } impl<T: ToOcamlRep> ToOcamlRep for Box<[T]> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { (**self).to_ocamlrep(alloc) } } impl<T: FromOcamlRep> FromOcamlRep for Box<[T]> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let vec = <Vec<T>>::from_ocamlrep(value)?; Ok(vec.into_boxed_slice()) } } impl<T: ToOcamlRep> ToOcamlRep for Vec<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(self.as_slice()) } } impl<T: FromOcamlRep> FromOcamlRep for Vec<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let mut vec = vec![]; let mut hd = value; while !hd.is_int() { let block = from::expect_tuple(hd, 2)?; vec.push(from::field(block, 0)?); hd = block[1]; } let hd = hd.as_int().unwrap(); if hd != 0 { return Err(FromError::ExpectedUnit(hd)); } Ok(vec) } } impl<'a, T: FromOcamlRep> FromOcamlRepIn<'a> for Vec<T> { fn from_ocamlrep_in(value: Value<'_>, _alloc: &'a Bump) -> Result<Self, FromError> { Self::from_ocamlrep(value) } } impl<K: ToOcamlRep + Ord, V: ToOcamlRep> ToOcamlRep for BTreeMap<K, V> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { if self.is_empty() { return Value::int(0); } let len = self.len(); let mut iter = self .iter() .map(|(k, v)| (k.to_ocamlrep(alloc), v.to_ocamlrep(alloc))); let (res, _) = sorted_iter_to_ocaml_map(&mut iter, alloc, len); res } } impl<K: FromOcamlRep + Ord, V: FromOcamlRep> FromOcamlRep for BTreeMap<K, V> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let mut map = BTreeMap::new(); btree_map_from_ocamlrep(&mut map, value)?; Ok(map) } } impl<'a, K: FromOcamlRep + Ord, V: FromOcamlRep> FromOcamlRepIn<'a> for BTreeMap<K, V> { fn from_ocamlrep_in(value: Value<'_>, _alloc: &'a Bump) -> Result<Self, FromError> { Self::from_ocamlrep(value) } } /// Given an iterator which emits key-value pairs (already converted to OCaml /// values), build an OCaml Map containing those bindings. /// /// The iterator must emit each key only once. The key-value pairs /// must be emitted in ascending order, sorted by key. The iterator /// must emit exactly `size` pairs. pub fn sorted_iter_to_ocaml_map<'a, A: Allocator>( iter: &mut impl Iterator<Item = (Value<'a>, Value<'a>)>, alloc: &'a A, size: usize, ) -> (Value<'a>, usize) { if size == 0 { return (Value::int(0), 0); } let (left, left_height) = sorted_iter_to_ocaml_map(iter, alloc, size / 2); let (key, val) = iter.next().unwrap(); let (right, right_height) = sorted_iter_to_ocaml_map(iter, alloc, size - 1 - size / 2); let height = std::cmp::max(left_height, right_height) + 1; let mut block = alloc.block_with_size(5); alloc.set_field(&mut block, 0, left); alloc.set_field(&mut block, 1, key); alloc.set_field(&mut block, 2, val); alloc.set_field(&mut block, 3, right); alloc.set_field(&mut block, 4, alloc.add_copy(height)); (block.build(), height) } fn btree_map_from_ocamlrep<K: FromOcamlRep + Ord, V: FromOcamlRep>( map: &mut BTreeMap<K, V>, value: Value<'_>, ) -> Result<(), FromError> { if value.is_int() { let _ = from::expect_nullary_variant(value, 0)?; return Ok(()); } let block = from::expect_block_with_size_and_tag(value, 5, 0)?; btree_map_from_ocamlrep(map, block[0])?; let key: K = from::field(block, 1)?; let val: V = from::field(block, 2)?; map.insert(key, val); btree_map_from_ocamlrep(map, block[3])?; Ok(()) } fn vec_from_ocaml_map_impl<K: FromOcamlRep, V: FromOcamlRep>( vec: &mut Vec<(K, V)>, value: Value<'_>, ) -> Result<(), FromError> { if value.is_int() { let _ = from::expect_nullary_variant(value, 0)?; return Ok(()); } let block = from::expect_block_with_size_and_tag(value, 5, 0)?; vec_from_ocaml_map_impl(vec, block[0])?; let key: K = from::field(block, 1)?; let val: V = from::field(block, 2)?; vec.push((key, val)); vec_from_ocaml_map_impl(vec, block[3])?; Ok(()) } pub fn vec_from_ocaml_map<K: FromOcamlRep, V: FromOcamlRep>( value: Value<'_>, ) -> Result<Vec<(K, V)>, FromError> { let mut vec = vec![]; vec_from_ocaml_map_impl(&mut vec, value)?; Ok(vec) } pub fn vec_from_ocaml_map_in<'a, K, V>( value: Value<'_>, vec: &mut bumpalo::collections::Vec<'a, (K, V)>, alloc: &'a Bump, ) -> Result<(), FromError> where K: FromOcamlRepIn<'a> + Ord, V: FromOcamlRepIn<'a>, { if value.is_int() { let _ = from::expect_nullary_variant(value, 0)?; return Ok(()); } let block = from::expect_block_with_size_and_tag(value, 5, 0)?; vec_from_ocaml_map_in(block[0], vec, alloc)?; let key: K = from::field_in(block, 1, alloc)?; let val: V = from::field_in(block, 2, alloc)?; vec.push((key, val)); vec_from_ocaml_map_in(block[3], vec, alloc)?; Ok(()) } impl<T: ToOcamlRep + Ord> ToOcamlRep for BTreeSet<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { if self.is_empty() { return Value::int(0); } let len = self.len(); let mut iter = self.iter().map(|x| x.to_ocamlrep(alloc)); let (res, _) = sorted_iter_to_ocaml_set(&mut iter, alloc, len); res } } impl<T: FromOcamlRep + Ord> FromOcamlRep for BTreeSet<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let mut set = BTreeSet::new(); btree_set_from_ocamlrep(&mut set, value)?; Ok(set) } } impl<'a, T: FromOcamlRep + Ord> FromOcamlRepIn<'a> for BTreeSet<T> { fn from_ocamlrep_in(value: Value<'_>, _alloc: &'a Bump) -> Result<Self, FromError> { Self::from_ocamlrep(value) } } /// Build an OCaml Set containing all items emitted by the given iterator. /// /// The iterator must emit each item only once. The items must be /// emitted in ascending order. The iterator must emit exactly `size` /// items. pub fn sorted_iter_to_ocaml_set<'a, A: Allocator>( iter: &mut impl Iterator<Item = Value<'a>>, alloc: &'a A, size: usize, ) -> (Value<'a>, usize) { if size == 0 { return (Value::int(0), 0); } let (left, left_height) = sorted_iter_to_ocaml_set(iter, alloc, size / 2); let val = iter.next().unwrap(); let (right, right_height) = sorted_iter_to_ocaml_set(iter, alloc, size - 1 - size / 2); let height = std::cmp::max(left_height, right_height) + 1; let mut block = alloc.block_with_size(4); alloc.set_field(&mut block, 0, left); alloc.set_field(&mut block, 1, val); alloc.set_field(&mut block, 2, right); alloc.set_field(&mut block, 3, alloc.add_copy(height)); (block.build(), height) } fn btree_set_from_ocamlrep<T: FromOcamlRep + Ord>( set: &mut BTreeSet<T>, value: Value<'_>, ) -> Result<(), FromError> { if value.is_int() { let _ = from::expect_nullary_variant(value, 0)?; return Ok(()); } let block = from::expect_block_with_size_and_tag(value, 4, 0)?; btree_set_from_ocamlrep(set, block[0])?; set.insert(from::field(block, 1)?); btree_set_from_ocamlrep(set, block[2])?; Ok(()) } fn vec_from_ocaml_set_impl<T: FromOcamlRep>( value: Value<'_>, vec: &mut Vec<T>, ) -> Result<(), FromError> { if value.is_int() { let _ = from::expect_nullary_variant(value, 0)?; return Ok(()); } let block = from::expect_block_with_size_and_tag(value, 4, 0)?; vec_from_ocaml_set_impl(block[0], vec)?; vec.push(from::field(block, 1)?); vec_from_ocaml_set_impl(block[2], vec)?; Ok(()) } pub fn vec_from_ocaml_set<T: FromOcamlRep>(value: Value<'_>) -> Result<Vec<T>, FromError> { let mut vec = vec![]; vec_from_ocaml_set_impl(value, &mut vec)?; Ok(vec) } pub fn vec_from_ocaml_set_in<'a, T: FromOcamlRepIn<'a> + Ord>( value: Value<'_>, vec: &mut bumpalo::collections::Vec<'a, T>, alloc: &'a Bump, ) -> Result<(), FromError> { if value.is_int() { let _ = from::expect_nullary_variant(value, 0)?; return Ok(()); } let block = from::expect_block_with_size_and_tag(value, 4, 0)?; vec_from_ocaml_set_in(block[0], vec, alloc)?; vec.push(from::field_in(block, 1, alloc)?); vec_from_ocaml_set_in(block[2], vec, alloc)?; Ok(()) } impl<K: ToOcamlRep + Ord, V: ToOcamlRep, S: BuildHasher + Default> ToOcamlRep for IndexMap<K, V, S> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { if self.is_empty() { return Value::int(0); } let mut vec: Vec<(&'a K, &'a V)> = self.iter().collect(); vec.sort_unstable_by_key(|&(k, _)| k); let len = vec.len(); let mut iter = vec.iter().map(|(k, v)| { let k: &'a K = k; let v: &'a V = v; (k.to_ocamlrep(alloc), v.to_ocamlrep(alloc)) }); let (res, _) = sorted_iter_to_ocaml_map(&mut iter, alloc, len); res } } impl<K: FromOcamlRep + Ord + Hash, V: FromOcamlRep, S: BuildHasher + Default> FromOcamlRep for IndexMap<K, V, S> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let vec = vec_from_ocaml_map(value)?; Ok(vec.into_iter().collect()) } } impl<T: ToOcamlRep + Ord, S: BuildHasher + Default> ToOcamlRep for IndexSet<T, S> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { if self.is_empty() { return Value::int(0); } let mut vec: Vec<&'a T> = self.iter().collect(); vec.sort_unstable(); let len = vec.len(); let mut iter = vec.iter().copied().map(|x| x.to_ocamlrep(alloc)); let (res, _) = sorted_iter_to_ocaml_set(&mut iter, alloc, len); res } } impl<T: FromOcamlRep + Ord + Hash, S: BuildHasher + Default> FromOcamlRep for IndexSet<T, S> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let set = <BTreeSet<T>>::from_ocamlrep(value)?; Ok(set.into_iter().collect()) } } #[cfg(unix)] impl ToOcamlRep for OsStr { // TODO: A Windows implementation would be nice, but what does the OCaml // runtime do? If we need Windows support, we'll have to find out. fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { use std::os::unix::ffi::OsStrExt; alloc.add(self.as_bytes()) } } #[cfg(unix)] impl ToOcamlRep for &'_ OsStr { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { use std::os::unix::ffi::OsStrExt; alloc.add(self.as_bytes()) } } #[cfg(unix)] impl<'a> FromOcamlRepIn<'a> for &'a OsStr { fn from_ocamlrep_in<'b>(value: Value<'b>, alloc: &'a Bump) -> Result<Self, FromError> { use std::os::unix::ffi::OsStrExt; Ok(std::ffi::OsStr::from_bytes(<&'a [u8]>::from_ocamlrep_in( value, alloc, )?)) } } #[cfg(unix)] impl ToOcamlRep for OsString { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(self.as_os_str()) } } #[cfg(unix)] impl FromOcamlRep for OsString { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { use std::os::unix::ffi::OsStrExt; Ok(OsString::from(std::ffi::OsStr::from_bytes( bytes_from_ocamlrep(value)?, ))) } } #[cfg(unix)] impl ToOcamlRep for Path { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(self.as_os_str()) } } #[cfg(unix)] impl ToOcamlRep for &'_ Path { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(self.as_os_str()) } } #[cfg(unix)] impl<'a> FromOcamlRepIn<'a> for &'a Path { fn from_ocamlrep_in<'b>(value: Value<'b>, alloc: &'a Bump) -> Result<Self, FromError> { Ok(Path::new(<&'a OsStr>::from_ocamlrep_in(value, alloc)?)) } } #[cfg(unix)] impl ToOcamlRep for PathBuf { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(self.as_os_str()) } } #[cfg(unix)] impl FromOcamlRep for PathBuf { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(PathBuf::from(OsString::from_ocamlrep(value)?)) } } impl ToOcamlRep for String { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(self.as_str()) } } impl FromOcamlRep for String { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(String::from(str_from_ocamlrep(value)?)) } } trivial_from_in_impl!(String); impl ToOcamlRep for Cow<'_, str> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { let s: &str = self.borrow(); alloc.add(s) } } impl FromOcamlRep for Cow<'_, str> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(Cow::Owned(String::from(str_from_ocamlrep(value)?))) } } impl ToOcamlRep for str { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { str_to_ocamlrep(self, alloc) } } impl ToOcamlRep for &'_ str { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.memoized(self.as_bytes().as_ptr() as usize, self.len(), |alloc| { (**self).to_ocamlrep(alloc) }) } } impl<'a> FromOcamlRepIn<'a> for &'a str { fn from_ocamlrep_in<'b>(value: Value<'b>, alloc: &'a Bump) -> Result<Self, FromError> { Ok(alloc.alloc_str(str_from_ocamlrep(value)?)) } } /// Allocate an OCaml string using the given allocator and copy the given string /// slice into it. pub fn str_to_ocamlrep<'a, A: Allocator>(s: &str, alloc: &'a A) -> Value<'a> { bytes_to_ocamlrep(s.as_bytes(), alloc) } /// Given an OCaml string, return a string slice pointing to its contents, if /// they are valid UTF-8. pub fn str_from_ocamlrep(value: Value<'_>) -> Result<&str, FromError> { Ok(std::str::from_utf8(bytes_from_ocamlrep(value)?)?) } impl ToOcamlRep for Vec<u8> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(self.as_slice()) } } impl FromOcamlRep for Vec<u8> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(Vec::from(bytes_from_ocamlrep(value)?)) } } impl ToOcamlRep for BString { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.add(self.as_slice()) } } impl FromOcamlRep for BString { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { Ok(Vec::from_ocamlrep(value)?.into()) } } impl ToOcamlRep for BStr { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { bytes_to_ocamlrep(self, alloc) } } impl ToOcamlRep for &'_ BStr { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.memoized(self.as_ptr() as usize, self.len(), |alloc| { (**self).to_ocamlrep(alloc) }) } } impl<'a> FromOcamlRepIn<'a> for &'a BStr { fn from_ocamlrep_in<'b>(value: Value<'b>, alloc: &'a Bump) -> Result<Self, FromError> { let slice: &[u8] = alloc.alloc_slice_copy(bytes_from_ocamlrep(value)?); Ok(slice.into()) } } impl ToOcamlRep for [u8] { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { bytes_to_ocamlrep(self, alloc) } } impl ToOcamlRep for &'_ [u8] { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { alloc.memoized(self.as_ptr() as usize, self.len(), |alloc| { (**self).to_ocamlrep(alloc) }) } } impl<'a> FromOcamlRepIn<'a> for &'a [u8] { fn from_ocamlrep_in<'b>(value: Value<'b>, alloc: &'a Bump) -> Result<Self, FromError> { Ok(alloc.alloc_slice_copy(bytes_from_ocamlrep(value)?)) } } /// Allocate an OCaml string using the given allocator and copy the given byte /// slice into it. pub fn bytes_to_ocamlrep<'a, A: Allocator>(bytes: &[u8], alloc: &'a A) -> Value<'a> { use std::io::Write; let mut w = alloc.byte_string_with_len(bytes.len()); let _ = w.write(bytes).unwrap(); w.build() } /// Given an OCaml string, return a byte slice pointing to its contents. pub fn bytes_from_ocamlrep(value: Value<'_>) -> Result<&[u8], FromError> { let block = from::expect_block(value)?; from::expect_block_tag(block, block::STRING_TAG)?;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
true
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/ptr.rs
ocamlrep/ptr.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. //! FFI types for representing pointers-to-OCaml-managed-data in Rust //! (`UnsafeOcamlPtr`) and pointers-to-Rust-managed-data in OCaml (`NakedPtr`). use std::fmt; use std::num::NonZeroUsize; use crate::Allocator; use crate::FromError; use crate::FromOcamlRep; use crate::ToOcamlRep; use crate::Value; /// Unsafe pointer to an OCaml value which is (possibly) managed by the garbage /// collector. /// /// Take care that the value stays rooted or the garbage collector does not run /// while an UnsafeOcamlPtr wrapper for it exists. /// /// While this can be used with an ocamlrep::Arena via to_ocamlrep, caution is /// required--the pointed-to value will *not* be cloned into the Arena, so a /// data structure containing UnsafeOcamlPtrs which is allocated into an Arena /// may contain pointers into the OCaml GC-ed heap. #[repr(transparent)] #[derive(Clone, Copy, Hash, PartialEq, Eq)] pub struct UnsafeOcamlPtr(NonZeroUsize); impl UnsafeOcamlPtr { /// # Safety /// /// `ptr` must be rooted or the garbage collector can not be allowed to run /// while an `UnsafeOcamlPtr` wrapper that contains it exists. pub unsafe fn new(ptr: usize) -> Self { Self(NonZeroUsize::new(ptr).unwrap()) } pub fn as_usize(self) -> usize { self.0.get() } #[inline(always)] pub const fn is_int(self) -> bool { // SAFETY: `Value::is_int` only checks the low bit, so it's safe // to interpret `self.0` as a value (we don't attempt to dereference it) unsafe { self.as_value().is_int() } } #[inline(always)] pub const fn is_block(self) -> bool { // SAFETY: `Value::is_block` only checks the low bit, so it's safe // to interpret `self.0` as a value (we don't attempt to dereference it) unsafe { self.as_value().is_block() } } /// Interpret this pointer as an OCaml value which is valid for lifetime 'a. /// /// # Safety /// /// The OCaml garbage collector must not run during this lifetime (even if /// the value is rooted). #[inline(always)] pub const unsafe fn as_value<'a>(self) -> Value<'a> { unsafe { Value::from_bits(self.0.get()) } } } impl fmt::Debug for UnsafeOcamlPtr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "0x{:x}", self.0) } } impl ToOcamlRep for UnsafeOcamlPtr { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { unsafe { Value::from_bits(self.0.get()) } } } impl FromOcamlRep for UnsafeOcamlPtr { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { if value.is_int() { return Err(FromError::ExpectedBlock(value.as_int().unwrap())); } Ok(unsafe { Self::new(value.to_bits()) }) } } impl<'a> crate::FromOcamlRepIn<'a> for UnsafeOcamlPtr { fn from_ocamlrep_in(value: Value<'_>, _alloc: &'a bumpalo::Bump) -> Result<Self, FromError> { Self::from_ocamlrep(value) } } /// Any kind of foreign pointer (i.e., a pointer to any data at all--it need not /// look like a valid OCaml value). /// /// On the OCaml side, these are represented as opaque types, e.g. `type addr;`. /// /// The pointer must not be within a memory page currently in use by the OCaml /// runtime for the garbage-collected heap (i.e., it must in fact be a foreign /// pointer). /// /// Can only be used when linking against a binary built with an OCaml compiler /// which was **not** configured with the `-no-naked-pointers` option (which /// forbids naked pointers, requiring foreign pointers to be wrapped in a block /// tagged with `Abstract_tag` instead). #[repr(transparent)] #[derive(Clone, Copy, Hash, PartialEq, Eq)] pub struct NakedPtr<T>(*const T); impl<T> NakedPtr<T> { pub fn new(ptr: *const T) -> Self { Self(ptr) } pub fn as_ptr(self) -> *const T { self.0 } } impl<T> fmt::Debug for NakedPtr<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:p}", self.0) } } impl<T> ToOcamlRep for NakedPtr<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> { unsafe { Value::from_bits(self.0 as usize) } } } impl<T> FromOcamlRep for NakedPtr<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { if value.is_int() { return Err(FromError::ExpectedBlock(value.as_int().unwrap())); } Ok(Self::new(value.to_bits() as *const T)) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/cache.rs
ocamlrep/cache.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. //! Provides `MemoizationCache`, a simple cache designed to aid implementation //! of the `Allocator` trait. use std::cell::RefCell; type HashMap<K, V> = rustc_hash::FxHashMap<K, V>; /// A simple scoped cache for memoizing conversions from one pointer-sized value /// to another. Useful for memoizing conversions between OCaml values and Rust /// references. pub struct MemoizationCache { /// Maps from input (address,size_in_bytes) -> output cache: RefCell<Option<HashMap<(usize, usize), usize>>>, } impl Default for MemoizationCache { #[inline(always)] fn default() -> Self { MemoizationCache::new() } } impl MemoizationCache { #[inline(always)] pub fn new() -> Self { Self { cache: RefCell::new(None), } } #[inline(always)] pub fn with_cache<T>(&self, f: impl FnOnce() -> T) -> T { // The `replace` below should not panic because the only borrows of // `self.cache` are in this function and in `memoized`. In both // functions, we do not hold a `Ref` or `RefMut` while calling into code // which might attempt to re-enter `memoized` or `with_cache`. let prev_value = self.cache.replace(Some(Default::default())); if prev_value.is_some() { panic!( "Attempted to re-enter MemoizationCache::with_cache \ (probably via ocamlrep::Allocator::add_root, which is not re-entrant)" ); } let result = f(); // As above, this `replace` should not panic. self.cache.replace(None); result } #[inline(always)] pub fn memoized(&self, input: usize, size_in_bytes: usize, f: impl FnOnce() -> usize) -> usize { if size_in_bytes == 0 { return f(); } let memoized_output = match (self.cache.borrow().as_ref()) .map(|cache| cache.get(&(input, size_in_bytes)).copied()) { None => return f(), Some(output) => output, }; match memoized_output { Some(output) => output, None => { let output = f(); // The `borrow_mut` below should not panic because the only // borrows of `self.cache` are in this function and in // `with_cache`. In this function, we do not hold a `Ref` or // `RefMut` while calling into `f` (or any other function which // might attempt to re-enter this function or `with_cache`). let mut cache = self.cache.borrow_mut(); // The `unwrap` below should not panic. We know `self.cache` was // not None upon entering this function because we looked up // `memoized_output`. The only function which can replace the // cache with None is `with_cache`, which would have panicked if // `f` attempted to re-enter it. (cache.as_mut().unwrap()).insert((input, size_in_bytes), output); output } } } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/test/test_add_root.rs
ocamlrep/test/test_add_root.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![cfg(test)] use ocamlrep::Allocator; use ocamlrep::Arena; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; #[test] fn shared_str() { // Without `add_root`, converting this tuple would convert the string // "hello" to its OCaml representation and copy it into the ocamlrep::Arena // twice. let arena = Arena::new(); let s = "hello"; let tuple = (s, s); let ocaml_tuple = arena.add_root(&tuple); let ocaml_block = ocaml_tuple.as_block().unwrap(); assert_eq!( ocaml_block[0].as_str(), Some(std::borrow::Cow::Borrowed("hello")) ); assert_eq!( ocaml_block[1].as_str(), Some(std::borrow::Cow::Borrowed("hello")) ); // The string pointer in the first field is physically equal to the string // pointer in the second field. assert_eq!(ocaml_block[0].to_bits(), ocaml_block[1].to_bits()); } #[test] fn shared_slice() { // Without `add_root`, converting this tuple would convert the list to its // OCaml representation and copy it into the ocamlrep::Arena twice. let arena = Arena::new(); let s = &[1usize, 2, 3][..]; let tuple = (s, s); let ocaml_tuple = arena.add_root(&tuple); assert_eq!( <(Vec<usize>, Vec<usize>)>::from_ocamlrep(ocaml_tuple), Ok((vec![1, 2, 3], vec![1, 2, 3])) ); // The list pointer in the first field is physically equal to the list // pointer in the second field. let ocaml_block = ocaml_tuple.as_block().unwrap(); assert_eq!(ocaml_block[0].to_bits(), ocaml_block[1].to_bits()); } #[test] fn overlapping_substrs() { // Without `Allocator::memoized_slice`, a naive implementation of // `ToOcamlRep` for slices might use `Allocator::memoized`, failing to take // the slice length into consideration. // Then we'd incorrectly produce the tuple ("hello", "hello"). let arena = Arena::new(); let s1 = "hello"; let s2 = &s1[..4]; let tuple = (s1, s2); let ocaml_tuple = arena.add_root(&tuple); assert_eq!( <(String, String)>::from_ocamlrep(ocaml_tuple), Ok((String::from("hello"), String::from("hell"))) ); } #[test] fn overlapping_subslices() { // Without `Allocator::memoized_slice`, a naive implementation of // `ToOcamlRep` for slices might use `Allocator::memoized`, failing to take // the slice length into consideration. // Then we'd incorrectly produce the tuple ([1,2,3], [1,2,3]). let arena = Arena::new(); let s1 = &[1usize, 2, 3][..]; let s2 = &s1[..2]; let tuple = (s1, s2); let ocaml_tuple = arena.add_root(&tuple); assert_eq!( <(Vec<usize>, Vec<usize>)>::from_ocamlrep(ocaml_tuple), Ok((vec![1, 2, 3], vec![1, 2])) ); } #[derive(Debug, PartialEq)] #[repr(transparent)] struct U32Pair(u64); impl U32Pair { fn new(fst: u32, snd: u32) -> Self { let fst = fst as u64; let snd = snd as u64; Self(fst << 32 | snd) } fn fst(&self) -> u32 { (self.0 >> 32) as u32 } fn snd(&self) -> u32 { self.0 as u32 } fn inner(&self) -> &u64 { &self.0 } } impl ToOcamlRep for U32Pair { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { let mut block = alloc.block_with_size(2); alloc.set_field(&mut block, 0, alloc.add_copy(self.fst())); alloc.set_field(&mut block, 1, alloc.add_copy(self.snd())); block.build() } } impl FromOcamlRep for U32Pair { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let (fst, snd) = <(u32, u32)>::from_ocamlrep(value)?; Ok(Self::new(fst, snd)) } } #[test] fn differently_typed_views_of_same_data() { // `Allocator::memoized` is keyed solely off of address and size in bytes. // If we have two views of the same bytes, but the views have two different // OCaml representations, then `Allocator::add_root` will produce an OCaml // value with an unexpected type. // // Here, `pair_as_int` gets converted first, and memoized. Since its address // and size are the same as `pair`, the allocator uses the same memoized // OCaml value for both. But the implementations of `ToOcamlRep` and // `FromOcamlRep` for U32Pair specify that the OCaml representation is a // tuple, not an immediate integer, so we encounter an error when trying to // convert the OCaml value to `(u64, U32Pair)`. let arena = Arena::new(); let pair = &U32Pair::new(1, 2); let pair_as_int = pair.inner(); let value = (pair_as_int, pair); use ocamlrep::FromError::*; assert_eq!( <(u64, U32Pair)>::from_ocamlrep(arena.add_root(&value)), Err(ErrorInField(1, Box::new(ExpectedBlock(1 << 32 | 2)))) ); // Using arena.add instead produces a correct result. assert_eq!( <(u64, U32Pair)>::from_ocamlrep(arena.add(&value)), Ok((1 << 32 | 2, U32Pair::new(1, 2))) ); } #[test] fn shared_rcs() { use std::rc::Rc; let arena = Arena::new(); let inner_tuple = Rc::new((1, 2)); let outer_tuple = Rc::new((Rc::clone(&inner_tuple), inner_tuple)); let ocaml_tuple = arena.add_root(&outer_tuple); let outer_tuple = ocaml_tuple.as_block().unwrap(); // The tuple pointer in the first field is physically equal to the tuple // pointer in the second field. assert_eq!(outer_tuple[0].to_bits(), outer_tuple[1].to_bits()); let inner_tuple = outer_tuple[0].as_block().unwrap(); assert_eq!(inner_tuple[0].as_int(), Some(1)); assert_eq!(inner_tuple[1].as_int(), Some(2)); } #[test] fn shared_arcs() { use std::sync::Arc; let arena = Arena::new(); let inner_tuple = Arc::new((1, 2)); let outer_tuple = Arc::new((Arc::clone(&inner_tuple), inner_tuple)); let ocaml_tuple = arena.add_root(&outer_tuple); let outer_tuple = ocaml_tuple.as_block().unwrap(); // The tuple pointer in the first field is physically equal to the tuple // pointer in the second field. assert_eq!(outer_tuple[0].to_bits(), outer_tuple[1].to_bits()); let inner_tuple = outer_tuple[0].as_block().unwrap(); assert_eq!(inner_tuple[0].as_int(), Some(1)); assert_eq!(inner_tuple[1].as_int(), Some(2)); }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/test/test_from_ocamlrep.rs
ocamlrep/test/test_from_ocamlrep.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![cfg(test)] use ocamlrep::Allocator; use ocamlrep::Arena; use ocamlrep::FromError::*; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use ocamlrep::Value; #[test] fn expected_block_but_got_int() { let value = Value::int(42); let err = <(isize, isize)>::from_ocamlrep(value).err().unwrap(); assert_eq!(err, ExpectedBlock(42)); } #[test] fn expected_int_but_got_block() { let arena = Arena::new(); let value = arena.block_with_size_and_tag(1, 0).build(); let err = isize::from_ocamlrep(value).err().unwrap(); match err { ExpectedInt(..) => {} _ => panic!("unexpected error: {err}"), } } #[test] fn wrong_tag_for_none() { let value = Value::int(1); let err = <Option<isize>>::from_ocamlrep(value).err().unwrap(); assert_eq!(err, NullaryVariantTagOutOfRange { max: 0, actual: 1 }); } #[test] fn wrong_tag_for_some() { let arena = Arena::new(); let value = arena.block_with_size_and_tag(1, 1).build(); let err = <Option<isize>>::from_ocamlrep(value).err().unwrap(); assert_eq!( err, ExpectedBlockTag { expected: 0, actual: 1 } ); } #[test] fn out_of_bool_range() { let value = Value::int(42); let err = bool::from_ocamlrep(value).err().unwrap(); assert_eq!(err, ExpectedBool(42)); } #[test] fn out_of_char_range() { let value = Value::int(-1); let err = char::from_ocamlrep(value).err().unwrap(); assert_eq!(err, ExpectedChar(-1)); } #[derive(FromOcamlRep, ToOcamlRep)] struct Foo { a: isize, b: bool, } #[test] fn bad_struct_field() { let arena = Arena::new(); let value = { let mut block = arena.block_with_size_and_tag(2, 0); arena.set_field(&mut block, 0, Value::int(0)); arena.set_field(&mut block, 1, Value::int(42)); block.build() }; let err = Foo::from_ocamlrep(value).err().unwrap(); assert_eq!(err, ErrorInField(1, Box::new(ExpectedBool(42)))); } #[derive(FromOcamlRep, ToOcamlRep)] struct Bar { c: Foo, d: Option<Vec<Option<isize>>>, } #[test] fn bad_nested_struct_field() { let arena = Arena::new(); let inner = { let mut block = arena.block_with_size_and_tag(2, 0); arena.set_field(&mut block, 0, Value::int(0)); arena.set_field(&mut block, 1, Value::int(42)); block.build() }; let outer = { let mut block = arena.block_with_size_and_tag(2, 0); arena.set_field(&mut block, 0, inner); arena.set_field(&mut block, 1, Value::int(0)); block.build() }; let err = Bar::from_ocamlrep(outer).err().unwrap(); assert_eq!( err, ErrorInField(0, Box::new(ErrorInField(1, Box::new(ExpectedBool(42))))) ); } #[derive(FromOcamlRep, ToOcamlRep)] struct UnitStruct; #[test] fn expected_unit_struct_but_got_nonzero() { let value = Value::int(42); let err = UnitStruct::from_ocamlrep(value).err().unwrap(); assert_eq!(err, ExpectedUnit(42)); } #[test] fn expected_unit_struct_but_got_block() { let arena = Arena::new(); let value = arena.block_with_size_and_tag(1, 0).build(); let err = UnitStruct::from_ocamlrep(value).err().unwrap(); match err { ExpectedInt(..) => {} _ => panic!("unexpected error: {err}"), } } #[derive(FromOcamlRep, ToOcamlRep)] struct WrapperStruct(bool); #[test] fn bad_value_in_wrapper_struct() { let value = Value::int(42); let err = WrapperStruct::from_ocamlrep(value).err().unwrap(); assert_eq!(err, ExpectedBool(42)) } #[derive(Debug, PartialEq, FromOcamlRep, ToOcamlRep)] enum Fruit { Apple, Orange(bool), Pear { is_tasty: bool }, Kiwi, Peach(Box<(isize, bool)>), } #[test] fn nullary_variant_tag_out_of_range() { let value = Value::int(42); let err = Fruit::from_ocamlrep(value).err().unwrap(); assert_eq!(err, NullaryVariantTagOutOfRange { max: 1, actual: 42 }); } #[test] fn block_variant_tag_out_of_range() { let arena = Arena::new(); let value = arena.block_with_size_and_tag(1, 42).build(); let err = Fruit::from_ocamlrep(value).err().unwrap(); assert_eq!(err, BlockTagOutOfRange { max: 2, actual: 42 }); } #[test] fn wrong_block_variant_size() { let arena = Arena::new(); let value = arena.block_with_size_and_tag(42, 0).build(); let err = Fruit::from_ocamlrep(value).err().unwrap(); assert_eq!( err, WrongBlockSize { expected: 1, actual: 42 } ); } #[test] fn bad_tuple_variant_value() { let arena = Arena::new(); let orange = { let mut orange = arena.block_with_size_and_tag(1, 0); arena.set_field(&mut orange, 0, Value::int(42)); orange.build() }; let err = Fruit::from_ocamlrep(orange).err().unwrap(); assert_eq!(err, ErrorInField(0, Box::new(ExpectedBool(42)))); } #[test] fn bad_struct_variant_value() { let arena = Arena::new(); let pear = { let mut pear = arena.block_with_size_and_tag(1, 1); arena.set_field(&mut pear, 0, Value::int(42)); pear.build() }; let err = Fruit::from_ocamlrep(pear).err().unwrap(); assert_eq!(err, ErrorInField(0, Box::new(ExpectedBool(42)))); } #[test] fn good_boxed_tuple_variant() { let arena = Arena::new(); let peach = { let mut peach = arena.block_with_size_and_tag(2, 2); arena.set_field(&mut peach, 0, Value::int(42)); arena.set_field(&mut peach, 1, Value::int(1)); peach.build() }; let peach = Fruit::from_ocamlrep(peach); assert_eq!(peach, Ok(Fruit::Peach(Box::new((42, true))))); } #[test] fn round_trip_through_ocaml_value_unsigned_int() { let num = 7334234036144964024u64; let value = Value::int(num as isize); let num_int: isize = ocamlrep::from::expect_int(value).ok().unwrap(); let num_uint: usize = !(1usize << 63) & ocamlrep::from::expect_int(value).ok().unwrap() as usize; assert!(num_int < 0); assert_eq!((num_int as u64) & !(1 << 63), num); assert_eq!(num_uint as u64, num); }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/test/test_bindings.rs
ocamlrep/test/test_bindings.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![feature(exit_status_error)] use std::cell::RefCell; use std::collections::BTreeMap; use std::collections::BTreeSet; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; fn val<T: FromOcamlRep + ToOcamlRep>(value: T) -> usize { let arena = Box::leak(Box::new(ocamlrep::Arena::new())); let value = arena.add(&value); // Round-trip back to T to exercise from_ocamlrep. let value = T::from_ocamlrep(value).unwrap(); let value = arena.add(&value); value.to_bits() } /// # Safety /// `value` must be a valid pointer to an OCaml value. #[unsafe(no_mangle)] pub unsafe extern "C" fn convert_to_ocamlrep(value: usize) -> usize { unsafe { let arena = Box::leak(Box::new(ocamlrep::Arena::new())); let value = ocamlrep::Value::from_bits(value); let value = value.clone_with_allocator(arena); value.to_bits() } } #[unsafe(no_mangle)] pub extern "C" fn realloc_in_ocaml_heap(value: usize) -> usize { let value = unsafe { ocamlrep::Value::from_bits(value) }; let pool = unsafe { ocamlrep_ocamlpool::Pool::new() }; value.clone_with_allocator(&pool).to_bits() } // Primitive Tests #[unsafe(no_mangle)] pub extern "C" fn get_a(_unit: usize) -> usize { val('a') } #[unsafe(no_mangle)] pub extern "C" fn get_five(_unit: usize) -> usize { val(5) } #[unsafe(no_mangle)] pub extern "C" fn get_true(_unit: usize) -> usize { val(true) } #[unsafe(no_mangle)] pub extern "C" fn get_false(_unit: usize) -> usize { val(false) } // Option Tests #[unsafe(no_mangle)] pub extern "C" fn get_none(_unit: usize) -> usize { val(None::<isize>) } #[unsafe(no_mangle)] pub extern "C" fn get_some_five(_unit: usize) -> usize { val(Some(5)) } #[unsafe(no_mangle)] pub extern "C" fn get_some_none(_unit: usize) -> usize { val(Some(None::<isize>)) } #[unsafe(no_mangle)] pub extern "C" fn get_some_some_five(_unit: usize) -> usize { val(Some(Some(5))) } // Ref tests #[unsafe(no_mangle)] pub extern "C" fn get_int_ref(_unit: usize) -> usize { val(RefCell::new(5)) } #[unsafe(no_mangle)] pub extern "C" fn get_int_option_ref(_unit: usize) -> usize { val(RefCell::new(Some(5))) } // Unsized type tests #[unsafe(no_mangle)] pub extern "C" fn get_str(_unit: usize) -> usize { let arena = Box::leak(Box::new(ocamlrep::Arena::new())); arena.add("static str").to_bits() } #[unsafe(no_mangle)] pub extern "C" fn get_byte_slice(_unit: usize) -> usize { let arena = Box::leak(Box::new(ocamlrep::Arena::new())); arena.add(&b"byte\x00\xFFslice"[..]).to_bits() } #[unsafe(no_mangle)] pub extern "C" fn get_int_opt_slice(_unit: usize) -> usize { let arena = Box::leak(Box::new(ocamlrep::Arena::new())); let vec = [None, Some(2), Some(3)]; let slice = &vec[..]; arena.add(slice).to_bits() } // List Tests #[unsafe(no_mangle)] pub extern "C" fn get_empty_list(_unit: usize) -> usize { val(Vec::<isize>::new()) } #[unsafe(no_mangle)] pub extern "C" fn get_five_list(_unit: usize) -> usize { val(vec![5]) } #[unsafe(no_mangle)] pub extern "C" fn get_one_two_three_list(_unit: usize) -> usize { val(vec![1, 2, 3]) } #[unsafe(no_mangle)] pub extern "C" fn get_float_list(_unit: usize) -> usize { val(vec![1.0, 2.0, 3.0]) } // Struct tests #[derive(FromOcamlRep, ToOcamlRep)] struct Foo { a: isize, b: bool, } #[derive(FromOcamlRep, ToOcamlRep)] struct Bar { c: Foo, d: Option<Vec<Option<isize>>>, } #[unsafe(no_mangle)] pub extern "C" fn get_foo(_unit: usize) -> usize { val(Foo { a: 25, b: true }) } #[unsafe(no_mangle)] pub extern "C" fn get_bar(_unit: usize) -> usize { val(Bar { c: Foo { a: 42, b: false }, d: Some(vec![Some(88), None, Some(66)]), }) } // String Tests #[unsafe(no_mangle)] pub extern "C" fn get_empty_string(_unit: usize) -> usize { val(String::from("")) } #[unsafe(no_mangle)] pub extern "C" fn get_a_string(_unit: usize) -> usize { val(String::from("a")) } #[unsafe(no_mangle)] pub extern "C" fn get_ab_string(_unit: usize) -> usize { val(String::from("ab")) } #[unsafe(no_mangle)] pub extern "C" fn get_abcde_string(_unit: usize) -> usize { val(String::from("abcde")) } #[unsafe(no_mangle)] pub extern "C" fn get_abcdefg_string(_unit: usize) -> usize { val(String::from("abcdefg")) } #[unsafe(no_mangle)] pub extern "C" fn get_abcdefgh_string(_unit: usize) -> usize { val(String::from("abcdefgh")) } #[unsafe(no_mangle)] pub extern "C" fn get_zero_float(_unit: usize) -> usize { val(0.0_f64) } #[unsafe(no_mangle)] pub extern "C" fn get_one_two_float(_unit: usize) -> usize { val(1.2_f64) } // Variant tests #[derive(FromOcamlRep, ToOcamlRep)] enum Fruit { Apple, Orange(isize), Pear { num: isize }, Kiwi, } #[unsafe(no_mangle)] pub extern "C" fn get_apple(_unit: usize) -> usize { val(Fruit::Apple) } #[unsafe(no_mangle)] pub extern "C" fn get_orange(_unit: usize) -> usize { val(Fruit::Orange(39)) } #[unsafe(no_mangle)] pub extern "C" fn get_pear(_unit: usize) -> usize { val(Fruit::Pear { num: 76 }) } #[unsafe(no_mangle)] pub extern "C" fn get_kiwi(_unit: usize) -> usize { val(Fruit::Kiwi) } // Map tests #[unsafe(no_mangle)] pub extern "C" fn get_empty_smap(_unit: usize) -> usize { let map: BTreeMap<String, isize> = BTreeMap::new(); val(map) } #[unsafe(no_mangle)] pub extern "C" fn get_int_smap_singleton(_unit: usize) -> usize { let mut map = BTreeMap::new(); map.insert(String::from("a"), 1); val(map) } #[unsafe(no_mangle)] pub extern "C" fn get_int_smap(_unit: usize) -> usize { let mut map = BTreeMap::new(); map.insert(String::from("a"), 1); map.insert(String::from("b"), 2); map.insert(String::from("c"), 3); val(map) } // Set tests #[unsafe(no_mangle)] pub extern "C" fn get_empty_sset(_unit: usize) -> usize { let set: BTreeSet<String> = BTreeSet::new(); val(set) } #[unsafe(no_mangle)] pub extern "C" fn get_sset_singleton(_unit: usize) -> usize { let mut set = BTreeSet::new(); set.insert(String::from("a")); val(set) } #[unsafe(no_mangle)] pub extern "C" fn get_sset(_unit: usize) -> usize { let mut set = BTreeSet::new(); set.insert(String::from("a")); set.insert(String::from("b")); set.insert(String::from("c")); val(set) } #[unsafe(no_mangle)] pub extern "C" fn roundtrip_int64(value: usize) -> usize { let i = unsafe { ocamlrep_caml_builtins::Int64::from_ocaml(value).unwrap() }; val(i) } // Hack! Trick buck into believing that these libraries are used. See [Note: // Test blocks for Cargo] in `ocamlrep_ocamlpool/test/ocamlpool_test.rs`. const _: () = { #[allow(unused_imports)] use anyhow; #[allow(unused_imports)] use cargo_test_utils; #[allow(unused_imports)] use tempfile; }; #[cfg(test)] mod tests { use anyhow::Result; use cargo_test_utils::*; use tempfile::TempDir; #[test] fn ocamlrep_test() -> Result<()> { let parent = std::path::Path::new(".."); let tmp_dir = TempDir::with_prefix("ocamlrep_test.")?; std::fs::copy( parent.join("test_ocamlrep.ml"), tmp_dir.path().join("test_ocamlrep.ml"), )?; let compile_cmd = cmd( "ocamlopt.opt", &[ "-verbose", "-c", "test_ocamlrep.ml", "-o", "test_ocamlrep_ml.cmx", ], Some(tmp_dir.path()), ); assert_eq!(run(compile_cmd).map_err(fmt_exit_status_err), Ok(())); let link_cmd = cmd( "ocamlopt.opt", &[ "-verbose", "-o", "ocamlrep_test", "test_ocamlrep_ml.cmx", "-ccopt", &("-L".to_owned() + workspace_dir(&["target", build_flavor()]).to_str().unwrap()), "-cclib", "-ltest_bindings", "-cclib", "-locamlrep_ocamlpool", ], Some(tmp_dir.path()), ); assert_eq!(run(link_cmd).map_err(fmt_exit_status_err), Ok(())); let ocamlrep_test_cmd = cmd( tmp_dir .path() .join("ocamlrep_test") .as_path() .to_str() .unwrap(), &[], None, ); assert_eq!(run(ocamlrep_test_cmd).map_err(fmt_exit_status_err), Ok(())); tmp_dir.close()?; Ok(()) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep/test/test_from_ocamlrep_in.rs
ocamlrep/test/test_from_ocamlrep_in.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![cfg(test)] use std::fmt::Debug; use bumpalo::Bump; use ocamlrep::FromOcamlRepIn; use ocamlrep::ToOcamlRep; fn test_round_trip<'a, T>(bump: &'a Bump, rust_value: T) where T: FromOcamlRepIn<'a> + ToOcamlRep + Debug + PartialEq, { let arena = ocamlrep::Arena::new(); let ocaml_value = arena.add(&rust_value); assert_eq!(T::from_ocamlrep_in(ocaml_value, bump), Ok(rust_value)); } #[test] fn convert_primitives() { let bump = &Bump::new(); test_round_trip(bump, ()); test_round_trip(bump, 1isize); test_round_trip(bump, 2usize); test_round_trip(bump, 3i64); test_round_trip(bump, 4u64); test_round_trip(bump, 5i32); test_round_trip(bump, 6u32); test_round_trip(bump, true); test_round_trip(bump, false); test_round_trip(bump, 'a'); test_round_trip(bump, 7.7f64); } #[test] fn convert_std_types() { let bump = &Bump::new(); test_round_trip(bump, None::<usize>); test_round_trip(bump, Some(&*bump.alloc(5usize))); test_round_trip(bump, Ok::<&str, &str>("okay")); test_round_trip(bump, Err::<&str, &str>("error")); } #[derive(Debug, FromOcamlRepIn, ToOcamlRep, PartialEq)] struct Foo<'a> { bar: &'a usize, baz: usize, } #[test] fn convert_struct_with_ref() { let bump = &Bump::new(); test_round_trip( bump, Foo { bar: bump.alloc(3), baz: 4, }, ); } #[derive(Debug, FromOcamlRepIn, ToOcamlRep, PartialEq)] enum Fruit<'a> { Apple, Orange(&'a str), Pear { is_tasty: bool }, Kiwi, Peach(&'a (isize, bool)), } #[test] fn convert_str_variant() { test_round_trip(&Bump::new(), Fruit::Orange("mandarin")); } #[test] fn convert_boxed_tuple_variant() { let bump = &Bump::new(); test_round_trip(bump, Fruit::Peach(bump.alloc((42, true)))); }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_marshal/ser.rs
ocamlrep_marshal/ser.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. // Initially generatd by c2rust of 'extern.c' at revision: // `f14c8ff3f8a164685bc24184fba84904391e378e`. use std::io; use std::io::Read; use std::io::Seek; use std::io::Write; use std::mem::MaybeUninit; use ocamlrep::FromOcamlRep; use ocamlrep::Header; use ocamlrep::Value; use crate::intext::*; unsafe extern "C" { fn ocaml_version() -> usize; } bitflags::bitflags! { /// Flags affecting marshaling #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] pub struct ExternFlags: u8 { /// Flag to ignore sharing const NO_SHARING = 1; /// Flag to allow marshaling code pointers. Not permitted in `ocamlrep_marshal`. const CLOSURES = 2; /// Flag to ensure that output can safely be read back on a 32-bit platform const COMPAT_32 = 4; } } // NB: Must match the definition order in ocaml's marshal.ml #[derive(ocamlrep::FromOcamlRep)] enum ExternFlag { NoSharing, Closures, Compat32, } impl From<ExternFlag> for ExternFlags { fn from(flag: ExternFlag) -> Self { match flag { ExternFlag::NoSharing => ExternFlags::NO_SHARING, ExternFlag::Closures => ExternFlags::CLOSURES, ExternFlag::Compat32 => ExternFlags::COMPAT_32, } } } impl FromOcamlRep for ExternFlags { fn from_ocamlrep(mut list: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let mut res = ExternFlags::empty(); while !list.is_int() { let block = ocamlrep::from::expect_tuple(list, 2)?; let flag: ExternFlag = ocamlrep::from::field(block, 0)?; res |= flag.into(); list = block[1]; } Ok(res) } } // Stack for pending values to marshal const EXTERN_STACK_INIT_SIZE: usize = 256; const EXTERN_STACK_MAX_SIZE: usize = 1024 * 1024 * 100; #[derive(Copy, Clone)] #[repr(C)] struct ExternItem<'a> { fields: &'a [Value<'a>], } // Hash table to record already-marshaled objects and their positions #[derive(Copy, Clone)] #[repr(C)] struct ObjectPosition<'a> { obj: Value<'a>, pos: usize, } // The hash table uses open addressing, linear probing, and a redundant // representation: // - a bitvector [present] records which entries of the table are occupied; // - an array [entries] records (object, position) pairs for the entries // that are occupied. // The bitvector is much smaller than the array (1/128th on 64-bit // platforms, 1/64th on 32-bit platforms), so it has better locality, // making it faster to determine that an object is not in the table. // Also, it makes it faster to empty or initialize a table: only the // [present] bitvector needs to be filled with zeros, the [entries] // array can be left uninitialized. #[repr(C)] struct PositionTable<'a> { shift: u8, size: usize, // size == 1 << (wordsize - shift) mask: usize, // mask == size - 1 threshold: usize, // threshold == a fixed fraction of size present: Box<[usize]>, // [bitvect_size(size)] /// SAFETY: Elements of `entries` are not initialized unless their /// corresponding bit is set in `present`. entries: Box<[MaybeUninit<ObjectPosition<'a>>]>, // [size] } const BITS_WORD: usize = 8 * std::mem::size_of::<usize>(); #[inline] const fn bitvect_size(n: usize) -> usize { n.div_ceil(BITS_WORD) } const POS_TABLE_INIT_SIZE_LOG2: usize = 8; const POS_TABLE_INIT_SIZE: usize = 1 << POS_TABLE_INIT_SIZE_LOG2; // Multiplicative Fibonacci hashing // (Knuth, TAOCP vol 3, section 6.4, page 518). // HASH_FACTOR is (sqrt(5) - 1) / 2 * 2^wordsize. const HASH_FACTOR: usize = 11400714819323198486; #[inline] const fn hash(v: Value<'_>, shift: u8) -> usize { v.to_bits().wrapping_mul(HASH_FACTOR) >> shift } // When the table becomes 2/3 full, its size is increased. #[inline] const fn threshold(sz: usize) -> usize { (sz * 2) / 3 } // Accessing bitvectors #[inline] fn bitvect_test(bv: &[usize], i: usize) -> bool { bv[i / BITS_WORD] & (1 << (i & (BITS_WORD - 1))) != 0 } #[inline] fn bitvect_set(bv: &mut [usize], i: usize) { bv[i / BITS_WORD] |= 1 << (i & (BITS_WORD - 1)); } // Conversion to big-endian #[inline] fn store16(dst: &mut impl Write, n: i16) -> io::Result<()> { dst.write_all(&n.to_be_bytes()) } #[inline] fn store32(dst: &mut impl Write, n: i32) -> io::Result<()> { dst.write_all(&n.to_be_bytes()) } #[inline] fn store64(dst: &mut impl Write, n: i64) -> io::Result<()> { dst.write_all(&n.to_be_bytes()) } #[repr(C)] struct State<'a, W: Write> { flags: ExternFlags, // logical or of some of the flags obj_counter: usize, // Number of objects emitted so far size_32: usize, // Size in words of 32-bit block for struct. size_64: usize, // Size in words of 64-bit block for struct. // Stack for pending value to marshal stack: Vec<ExternItem<'a>>, // Hash table to record already marshalled objects pos_table: PositionTable<'a>, // The output file or buffer we are writing to output: W, output_len: usize, } impl<'a, W: Write> State<'a, W> { fn new(output: W) -> Self { Self { flags: ExternFlags::empty(), obj_counter: 0, size_32: 0, size_64: 0, stack: Vec::with_capacity(EXTERN_STACK_INIT_SIZE), pos_table: PositionTable { shift: 0, size: 0, mask: 0, threshold: 0, present: [].into(), entries: [].into(), }, output, output_len: 0, } } /// Initialize the position table fn init_position_table(&mut self) { if self.flags.contains(ExternFlags::NO_SHARING) { return; } self.pos_table.size = POS_TABLE_INIT_SIZE; self.pos_table.shift = (8 * std::mem::size_of::<Value<'a>>() - POS_TABLE_INIT_SIZE_LOG2) as u8; self.pos_table.mask = POS_TABLE_INIT_SIZE - 1; self.pos_table.threshold = threshold(POS_TABLE_INIT_SIZE); // SAFETY: zero is a valid value for the elements of `present`. unsafe { self.pos_table.present = Box::new_zeroed_slice(bitvect_size(POS_TABLE_INIT_SIZE)).assume_init(); } self.pos_table.entries = Box::new_uninit_slice(POS_TABLE_INIT_SIZE); } /// Grow the position table fn resize_position_table(&mut self) { let new_size: usize; let new_shift: u8; // Grow the table quickly (x 8) up to 10^6 entries, // more slowly (x 2) afterwards. if self.pos_table.size < 1000000 { new_size = self.pos_table.size * 8; new_shift = self.pos_table.shift - 3; } else { new_size = self.pos_table.size * 2; new_shift = self.pos_table.shift - 1; } let old = std::mem::replace( &mut self.pos_table, PositionTable { size: new_size, shift: new_shift, mask: new_size - 1, threshold: threshold(new_size), // SAFETY: zero is a valid value for the elements of `present`. present: unsafe { Box::new_zeroed_slice(bitvect_size(new_size)).assume_init() }, entries: Box::new_uninit_slice(new_size), }, ); // Insert every entry of the old table in the new table let mut i = 0; while i < old.size { if bitvect_test(&old.present, i) { // SAFETY: We checked that the bit for `i` is set in // `old.present`, so `entries[i]` must be initialized let old_entry = unsafe { old.entries[i].assume_init() }; let mut h = hash(old_entry.obj, self.pos_table.shift); while bitvect_test(&self.pos_table.present, h) { h = (h + 1) & self.pos_table.mask } bitvect_set(&mut self.pos_table.present, h); self.pos_table.entries[h] = MaybeUninit::new(old_entry); } i += 1 } } /// Determine whether the given object [obj] is in the hash table. /// If so, set `*pos_out` to its position in the output and return true. /// If not, set `*h_out` to the hash value appropriate for /// `record_location` and return false. #[inline] fn lookup_position(&self, obj: Value<'a>, pos_out: &mut usize, h_out: &mut usize) -> bool { let mut h: usize = hash(obj, self.pos_table.shift); loop { if !bitvect_test(&self.pos_table.present, h) { *h_out = h; return false; } // SAFETY: We checked that the bit for `h` is set in `present`, so // `entries[h]` must be initialized let entry = unsafe { self.pos_table.entries[h].assume_init_ref() }; if entry.obj == obj { *pos_out = entry.pos; return true; } h = (h + 1) & self.pos_table.mask } } /// Record the output position for the given object [obj]. /// /// The [h] parameter is the index in the hash table where the object /// must be inserted. It was determined during lookup. fn record_location(&mut self, obj: Value<'a>, h: usize) { if self.flags.contains(ExternFlags::NO_SHARING) { return; } bitvect_set(&mut self.pos_table.present, h); self.pos_table.entries[h] = MaybeUninit::new(ObjectPosition { obj, pos: self.obj_counter, }); self.obj_counter += 1; if self.obj_counter >= self.pos_table.threshold { self.resize_position_table(); }; } // Write characters, integers, and blocks in the output buffer #[inline] fn write(&mut self, c: u8) -> io::Result<()> { self.output_len += 1; self.output.write_all(&[c]) } fn writeblock(&mut self, data: &[u8]) -> io::Result<()> { self.output_len += data.len(); self.output.write_all(data) } #[inline] fn writeblock_float8(&mut self, data: &[f64]) -> io::Result<()> { if ARCH_FLOAT_ENDIANNESS == 0x01234567 || ARCH_FLOAT_ENDIANNESS == 0x76543210 { // SAFETY: `data.as_ptr()` will be valid for reads of `data.len() * // size_of::<f64>()` bytes self.writeblock(unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, std::mem::size_of_val(data)) }) } else { unimplemented!() } } fn writecode8(&mut self, code: u8, val: i8) -> io::Result<()> { self.output_len += 2; self.output.write_all(&[code, val as u8]) } fn writecode16(&mut self, code: u8, val: i16) -> io::Result<()> { self.output_len += 3; self.output.write_all(&[code])?; store16(&mut self.output, val) } fn writecode32(&mut self, code: u8, val: i32) -> io::Result<()> { self.output_len += 5; self.output.write_all(&[code])?; store32(&mut self.output, val) } fn writecode64(&mut self, code: u8, val: i64) -> io::Result<()> { self.output_len += 9; self.output.write_all(&[code])?; store64(&mut self.output, val) } /// Marshaling integers #[inline] fn extern_int(&mut self, n: isize) -> io::Result<()> { if (0..0x40).contains(&n) { self.write(PREFIX_SMALL_INT + n as u8) } else if (-(1 << 7)..(1 << 7)).contains(&n) { self.writecode8(CODE_INT8, n as i8) } else if (-(1 << 15)..(1 << 15)).contains(&n) { self.writecode16(CODE_INT16, n as i16) } else if !(-(1 << 30)..(1 << 30)).contains(&n) { if self.flags.contains(ExternFlags::COMPAT_32) { panic!("output_value: integer cannot be read back on 32-bit platform"); } self.writecode64(CODE_INT64, n as i64) } else { self.writecode32(CODE_INT32, n as i32) } } /// Marshaling references to previously-marshaled blocks #[inline] fn extern_shared_reference(&mut self, d: usize) -> io::Result<()> { if d < 0x100 { self.writecode8(CODE_SHARED8, d as i8) } else if d < 0x10000 { self.writecode16(CODE_SHARED16, d as i16) } else if d >= 1 << 32 { self.writecode64(CODE_SHARED64, d as i64) } else { self.writecode32(CODE_SHARED32, d as i32) } } /// Marshaling block headers #[inline] fn extern_header(&mut self, sz: usize, tag: u8) -> io::Result<()> { if tag < 16 && sz < 8 { self.write(PREFIX_SMALL_BLOCK + tag + ((sz as u8) << 4)) } else { // Note: ocaml-14.4.0 uses `Caml_white` (`0 << 8`) // ('caml/runtime/gc.h'). // // In ocaml-5, via PR https://github.com/ocaml/ocaml/pull/10831, in // commit // `https://github.com/ocaml/ocaml/commit/868265f4532a2cc33bbffd83221c9613e743d759` // this becomes, // let hd: header_t = Make_header(sz, tag, NOT_MARKABLE); // where, `NOT_MARKABLE` (`3 << 8`) ('caml/runtime/shared_heap.h'). // Check the prevailing OCaml version is well initialized & one // we've tested for. let which_ocaml = unsafe { ocaml_version() }; if ![41400, 41401, 50000, 50100, 50101, 50200].contains(&which_ocaml) { panic!("unexpected ocaml version: {which_ocaml}!"); } let color = if which_ocaml < 50000 { ocamlrep::Color::White } else { ocamlrep::Color::Black }; let hd = Header::with_color(sz, tag, color).to_bits(); if sz > 0x3FFFFF && self.flags.contains(ExternFlags::COMPAT_32) { panic!("output_value: array cannot be read back on 32-bit platform"); } if hd < 1 << 32 { self.writecode32(CODE_BLOCK32, hd as i32) } else { self.writecode64(CODE_BLOCK64, hd as i64) } } } #[inline] fn extern_string(&mut self, bytes: &'a [u8]) -> io::Result<()> { let len = bytes.len(); if len < 0x20 { self.write(PREFIX_SMALL_STRING + len as u8)?; } else if len < 0x100 { self.writecode8(CODE_STRING8, len as i8)?; } else { if len > 0xFFFFFB && self.flags.contains(ExternFlags::COMPAT_32) { panic!("output_value: string cannot be read back on 32-bit platform"); } if len < 1 << 32 { self.writecode32(CODE_STRING32, len as i32)?; } else { self.writecode64(CODE_STRING64, len as i64)?; } } self.writeblock(bytes) } /// Marshaling FP numbers #[inline] fn extern_double(&mut self, v: f64) -> io::Result<()> { self.write(CODE_DOUBLE_NATIVE)?; self.writeblock_float8(&[v]) } /// Marshaling FP arrays #[inline] fn extern_double_array(&mut self, slice: &[f64]) -> io::Result<()> { let nfloats = slice.len(); if nfloats < 0x100 { self.writecode8(CODE_DOUBLE_ARRAY8_NATIVE, nfloats as i8)?; } else { if nfloats > 0x1FFFFF && self.flags.contains(ExternFlags::COMPAT_32) { panic!("output_value: float array cannot be read back on 32-bit platform"); } if nfloats < 1 << 32 { self.writecode32(CODE_DOUBLE_ARRAY32_NATIVE, nfloats as i32)?; } else { self.writecode64(CODE_DOUBLE_ARRAY64_NATIVE, nfloats as i64)?; } } self.writeblock_float8(slice) } /// Marshal the given value in the output buffer fn extern_rec(&mut self, mut v: Value<'a>) -> io::Result<()> { let mut goto_next_item: bool; let mut h: usize = 0; let mut pos: usize = 0; self.init_position_table(); loop { if v.is_int() { self.extern_int(v.as_int().unwrap())?; } else { let b = v.as_block().unwrap(); let tag = b.tag(); let sz = b.size(); if tag == ocamlrep::FORWARD_TAG { let f = b[0]; if let Some(fb) = f.as_block() { if fb.tag() == ocamlrep::FORWARD_TAG || fb.tag() == ocamlrep::LAZY_TAG || fb.tag() == ocamlrep::FORCING_TAG || fb.tag() == ocamlrep::DOUBLE_TAG { // Do not short-circuit the pointer. } else { v = f; continue; } } else { v = f; continue; } } // Atoms are treated specially for two reasons: they are not allocated // in the externed block, and they are automatically shared. if sz == 0 { self.extern_header(0, tag)?; } else { // Check if object already seen if !self.flags.contains(ExternFlags::NO_SHARING) { if self.lookup_position(v, &mut pos, &mut h) { self.extern_shared_reference(self.obj_counter - pos)?; goto_next_item = true; } else { goto_next_item = false; } } else { goto_next_item = false; } if !goto_next_item { // Output the contents of the object match tag { ocamlrep::STRING_TAG => { let bytes = v.as_byte_string().unwrap(); let len: usize = bytes.len(); self.extern_string(bytes)?; self.size_32 += 1 + (len + 4) / 4; self.size_64 += 1 + (len + 8) / 8; self.record_location(v, h); } ocamlrep::DOUBLE_TAG => { self.extern_double(v.as_float().unwrap())?; self.size_32 += 1 + 2; self.size_64 += 1 + 1; self.record_location(v, h); } ocamlrep::DOUBLE_ARRAY_TAG => { let slice = v.as_double_array().unwrap(); self.extern_double_array(slice)?; let nfloats = slice.len(); self.size_32 += 1 + nfloats * 2; self.size_64 += 1 + nfloats; self.record_location(v, h); } ocamlrep::ABSTRACT_TAG => { panic!("output_value: abstract value (Abstract)"); } // INFIX_TAG represents an infix header inside a // closure, and can only occur in blocks with tag // CLOSURE_TAG ocamlrep::INFIX_TAG => { panic!("output_value: marshaling of closures not implemented"); } ocamlrep::CUSTOM_TAG => { panic!("output_value: marshaling of custom blocks not implemented"); } ocamlrep::CLOSURE_TAG => { panic!("output_value: marshaling of closures not implemented"); } _ => { self.extern_header(sz, tag)?; self.size_32 += 1 + sz; self.size_64 += 1 + sz; self.record_location(v, h); // Remember that we still have to serialize fields 1 ... sz - 1 if sz > 1 { if self.stack.len() + 1 >= EXTERN_STACK_MAX_SIZE { panic!("Stack overflow in marshaling value"); } self.stack.push(ExternItem { fields: &b.as_values().unwrap()[1..], }); } // Continue serialization with the first field v = v.field(0).unwrap(); continue; } } } } } // C goto label `next_item:` here // Pop one more item to marshal, if any if let Some(item) = self.stack.last_mut() { v = item.fields[0]; item.fields = &item.fields[1..]; if item.fields.is_empty() { self.stack.pop(); } } else { // We are done. return Ok(()); } } // Never reached as function leaves with return } fn extern_value( &mut self, v: Value<'a>, flags: ExternFlags, mut header: &mut [u8], // out header_len: &mut usize, // out ) -> io::Result<usize> { // Initializations self.flags = flags; self.obj_counter = 0; self.size_32 = 0; self.size_64 = 0; // Marshal the object self.extern_rec(v)?; // Write the header let res_len = self.output_len; if res_len >= (1 << 32) || self.size_32 >= (1 << 32) || self.size_64 >= (1 << 32) { // The object is too big for the small header format. // Fail if we are in compat32 mode, or use big header. if self.flags.contains(ExternFlags::COMPAT_32) { panic!("output_value: object too big to be read back on 32-bit platform"); } store32(&mut header, MAGIC_NUMBER_BIG as i32)?; store32(&mut header, 0)?; store64(&mut header, res_len as i64)?; store64(&mut header, self.obj_counter as i64)?; store64(&mut header, self.size_64 as i64)?; *header_len = 32; Ok(res_len) } else { // Use the small header format store32(&mut header, MAGIC_NUMBER_SMALL as i32)?; store32(&mut header, res_len as i32)?; store32(&mut header, self.obj_counter as i32)?; store32(&mut header, self.size_32 as i32)?; store32(&mut header, self.size_64 as i32)?; *header_len = 20; Ok(res_len) } } } pub fn output_value<W: Read + Write + Seek>( w: &mut W, v: Value<'_>, flags: ExternFlags, ) -> io::Result<()> { let mut header = [0; 32]; let mut header_len = 0; // At this point we don't know the size of the header. // Guess that it is small, and fix up later if not. w.rewind()?; w.write_all(&[0; 20])?; let mut s = State::new(&mut *w); s.extern_value(v, flags, &mut header, &mut header_len)?; drop(s); w.flush()?; if header_len != 20 { // Bad guess! Need to shift the output to make room for big header. w.seek(std::io::SeekFrom::Start(20))?; let mut output = vec![]; w.read_to_end(&mut output)?; w.seek(std::io::SeekFrom::Start(header_len as u64))?; w.write_all(&output)?; } w.rewind()?; w.write_all(&header[0..header_len])?; w.flush() }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_marshal/deser.rs
ocamlrep_marshal/deser.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. // This file ('deser.rs') was based off c2rust generated code of 'intern.c' at // revision `15553b77175270d987058b386d737ccb939e8d5a` (i.e. the 4.14.0 tag). use ocamlrep::Value; use crate::intext::*; #[derive(Copy, Clone)] #[repr(C)] pub struct MarshalHeader { pub magic: u32, pub header_len: i32, pub data_len: usize, pub num_objects: usize, pub whsize: usize, } struct InternItem<'a> { pub dest: *mut Value<'a>, pub arg: usize, } struct State<'s, 'a, A> { /// Slice holding input data. intern_src: &'s [u8], /// The allocator of OCaml objects, e.g. `ocamlrep::Arena` or /// `ocamlrep_ocamlpool::Pool`. alloc: &'a A, /// Count how many objects seen so far. obj_counter: usize, /// Objects already seen. intern_obj_table: Vec<Value<'a>>, /// The "recursion stack" used in `intern_rec`. stack: Vec<InternItem<'a>>, } impl<'s, 'a, A: ocamlrep::Allocator> State<'s, 'a, A> { const INTERN_STACK_INIT_SIZE: usize = 256; fn new(alloc: &'a A, intern_src: &'s [u8]) -> Self { Self { intern_src, alloc, obj_counter: 0, intern_obj_table: Vec::new(), stack: Vec::with_capacity(Self::INTERN_STACK_INIT_SIZE), } } #[inline] fn read8u(&mut self) -> u8 { let size = std::mem::size_of::<u8>(); let res = u8::from_be_bytes(self.intern_src[..size].try_into().unwrap()); self.intern_src = &self.intern_src[size..]; res } #[inline] fn read8s(&mut self) -> i8 { let size = std::mem::size_of::<i8>(); let res = i8::from_be_bytes(self.intern_src[..size].try_into().unwrap()); self.intern_src = &self.intern_src[size..]; res } #[inline] fn read16u(&mut self) -> u16 { let size = std::mem::size_of::<u16>(); let res = u16::from_be_bytes(self.intern_src[..size].try_into().unwrap()); self.intern_src = &self.intern_src[size..]; res } #[inline] fn read16s(&mut self) -> i16 { let size = std::mem::size_of::<i16>(); let res = i16::from_be_bytes(self.intern_src[..size].try_into().unwrap()); self.intern_src = &self.intern_src[size..]; res } #[inline] fn read32u(&mut self) -> u32 { let size = std::mem::size_of::<u32>(); let res = u32::from_be_bytes(self.intern_src[..size].try_into().unwrap()); self.intern_src = &self.intern_src[size..]; res } #[inline] fn read32s(&mut self) -> i32 { let size = std::mem::size_of::<i32>(); let res = i32::from_be_bytes(self.intern_src[..size].try_into().unwrap()); self.intern_src = &self.intern_src[size..]; res } #[inline] fn read64u(&mut self) -> u64 { let size = std::mem::size_of::<u64>(); let res = u64::from_be_bytes(self.intern_src[..size].try_into().unwrap()); self.intern_src = &self.intern_src[8..]; res } fn readfloat(&mut self, dst: &mut f64, code: u8) { if std::mem::size_of::<f64>() != 8 { panic!("input_value: non-standard floats"); } let src: [u8; 8] = self.intern_src[..8].try_into().unwrap(); self.intern_src = &self.intern_src[8..]; *dst = match code { CODE_DOUBLE_BIG => f64::from_be_bytes(src), CODE_DOUBLE_LITTLE => f64::from_le_bytes(src), _ => unreachable!(), } } fn readfloats(&mut self, dest: &mut [f64], code: u8) { if std::mem::size_of::<f64>() != 8 { panic!("input_value: non-standard floats"); } let count = dest.len() * 8; // number of bytes let bytes = &self.intern_src[..count]; self.intern_src = &self.intern_src[count..]; for (i, bytes) in bytes.chunks_exact(8).enumerate() { let src: [u8; 8] = bytes.try_into().unwrap(); dest[i] = match code { CODE_DOUBLE_ARRAY8_BIG | CODE_DOUBLE_ARRAY32_BIG => f64::from_be_bytes(src), CODE_DOUBLE_ARRAY8_LITTLE | CODE_DOUBLE_ARRAY32_LITTLE => f64::from_le_bytes(src), _ => unreachable!(), }; } } unsafe fn intern_rec(&mut self, mut dest: *mut Value<'a>) { unsafe { const READ_BLOCK_LABEL: u64 = 16649699497103515194; const READ_STRING_LABEL: u64 = 11970676656440271524; const READ_SHARED_LABEL: u64 = 8656139126282042408; const READ_DOUBLE_ARRAY_LABEL: u64 = 8966088013221564425; const NOTHING_TO_DO_LABEL: u64 = 8288085890650723895; let mut current_block: u64; let mut header: usize; let mut code: u8; let mut tag: u8 = 0; let mut size: usize = 0; let mut len: usize = 0; let mut v: Value<'a> = Value::from_bits(0); let mut ofs: usize = 0; // Initially let's try to read the first object from the stream self.stack.push(InternItem { dest, arg: 1 }); // The un-marshaler loop, the recursion is unrolled while let Some(top) = self.stack.last_mut() { // Interpret next item on the stack dest = top.dest; // Pop item top.dest = top.dest.offset(1); top.arg -= 1; if top.arg == 0 { self.stack.pop(); } // Read a value and set v to this value code = self.read8u(); if code >= PREFIX_SMALL_INT { if code >= PREFIX_SMALL_BLOCK { // Small block tag = code & 0xf; size = (code >> 4 & 0x7) as usize; current_block = READ_BLOCK_LABEL; } else { // Small integer v = Value::int((code & 0x3F) as isize); current_block = NOTHING_TO_DO_LABEL; } } else { if code >= PREFIX_SMALL_STRING { // Small string len = (code & 0x1f) as usize; current_block = READ_STRING_LABEL; } else { match code { CODE_INT8 => { v = Value::from_bits(((self.read8s() as usize) << 1) + 1); current_block = NOTHING_TO_DO_LABEL; } CODE_INT16 => { v = Value::from_bits(((self.read16s() as usize) << 1) + 1); current_block = NOTHING_TO_DO_LABEL; } CODE_INT32 => { v = Value::from_bits(((self.read32s() as usize) << 1) + 1); current_block = NOTHING_TO_DO_LABEL; } CODE_INT64 => { v = Value::from_bits(((self.read64u() as usize) << 1) + 1); current_block = NOTHING_TO_DO_LABEL; } CODE_SHARED8 => { ofs = self.read8u() as usize; current_block = READ_SHARED_LABEL; } CODE_SHARED16 => { ofs = self.read16u() as usize; current_block = READ_SHARED_LABEL; } CODE_SHARED32 => { ofs = self.read32u() as usize; current_block = READ_SHARED_LABEL; } CODE_SHARED64 => { ofs = self.read64u() as usize; current_block = READ_SHARED_LABEL; } CODE_BLOCK32 => { header = self.read32u() as usize; tag = (header & 0xff) as u8; size = header >> 10; current_block = READ_BLOCK_LABEL; } CODE_BLOCK64 => { header = self.read64u() as usize; tag = (header & 0xff) as u8; size = header >> 10; current_block = READ_BLOCK_LABEL; } CODE_STRING8 => { len = self.read8u() as usize; current_block = READ_STRING_LABEL; } CODE_STRING32 => { len = self.read32u() as usize; current_block = READ_STRING_LABEL; } CODE_STRING64 => { len = self.read64u() as usize; current_block = READ_STRING_LABEL; } CODE_DOUBLE_LITTLE | CODE_DOUBLE_BIG => { let mut builder = self.alloc.block_with_size_and_tag( ocamlrep::DOUBLE_WOSIZE, ocamlrep::DOUBLE_TAG, ); self.readfloat( &mut *(self.alloc.block_ptr_mut(&mut builder) as *mut f64), code, ); v = Value::from_bits(builder.build().to_bits()); self.obj_counter += 1; self.intern_obj_table.push(v); current_block = NOTHING_TO_DO_LABEL; } CODE_DOUBLE_ARRAY8_LITTLE | CODE_DOUBLE_ARRAY8_BIG => { len = self.read8u() as usize; current_block = READ_DOUBLE_ARRAY_LABEL; } CODE_DOUBLE_ARRAY32_LITTLE | CODE_DOUBLE_ARRAY32_BIG => { len = self.read32u() as usize; current_block = READ_DOUBLE_ARRAY_LABEL; } CODE_DOUBLE_ARRAY64_LITTLE | CODE_DOUBLE_ARRAY64_BIG => { len = self.read64u() as usize; current_block = READ_DOUBLE_ARRAY_LABEL; } CODE_CODEPOINTER | CODE_INFIXPOINTER | CODE_CUSTOM | CODE_CUSTOM_LEN | CODE_CUSTOM_FIXED => { unimplemented!() } _ => { panic!("input_value: ill-formed message"); } } match current_block { NOTHING_TO_DO_LABEL | READ_BLOCK_LABEL | READ_STRING_LABEL => {} _ => { match current_block { READ_SHARED_LABEL => { v = self.intern_obj_table[self.obj_counter - ofs]; } _ /* READ_DOUBLE_ARRAY_LABEL */ => { size = len * ocamlrep::DOUBLE_WOSIZE; let mut builder = self.alloc.block_with_size_and_tag( size, ocamlrep::DOUBLE_ARRAY_TAG, ); self.readfloats( std::slice::from_raw_parts_mut(self.alloc.block_ptr_mut(&mut builder) as *mut f64, len), code, ); v = Value::from_bits(builder.build().to_bits()); self.obj_counter += 1; self.intern_obj_table.push(v); } } current_block = NOTHING_TO_DO_LABEL; } } } match current_block { NOTHING_TO_DO_LABEL| READ_BLOCK_LABEL => {} _ /* READ_STRING_LABEL */ => { size = (len + std::mem::size_of::<Value<'_>>()) / std::mem::size_of::<Value<'_>>(); v = Value::from_bits(ocamlrep::bytes_to_ocamlrep(&self.intern_src[..len], self.alloc).to_bits()); self.intern_src = &self.intern_src[len..]; self.obj_counter += 1; self.intern_obj_table.push(v); current_block = NOTHING_TO_DO_LABEL; } } } if current_block == READ_BLOCK_LABEL { if size == 0 { panic!("input_value: atoms are not supported"); } else { let mut builder = self.alloc.block_with_size_and_tag(size, tag); if tag == ocamlrep::OBJECT_TAG { panic!("input_value: objects not supported"); } else { // If it's not an object then read the // contents of the block self.stack.push(InternItem { dest: self.alloc.block_ptr_mut(&mut builder), arg: size, }); } v = Value::from_bits(builder.build().to_bits()); self.obj_counter += 1; self.intern_obj_table.push(v); } } *dest = v } } } unsafe fn parse_header(&mut self, h: *mut MarshalHeader) { unsafe { (*h).magic = self.read32u(); match (*h).magic { MAGIC_NUMBER_SMALL => { (*h).header_len = 20; (*h).data_len = self.read32u() as usize; (*h).num_objects = self.read32u() as usize; self.read32u(); (*h).whsize = self.read32u() as usize } MAGIC_NUMBER_BIG => { (*h).header_len = 32; self.read32u(); (*h).data_len = self.read64u() as usize; (*h).num_objects = self.read64u() as usize; (*h).whsize = self.read64u() as usize; } _ => panic!("input_value: bad object"), }; } } unsafe fn input_val_from_string(&mut self, str: &'s [u8]) -> usize { unsafe { let mut obj = Value::int(0).to_bits(); let mut h = MarshalHeader { magic: 0, header_len: 0, data_len: 0, num_objects: 0, whsize: 0, }; self.parse_header(&mut h); if h.header_len as usize + h.data_len > str.len() { panic!("input_value: bad length"); } self.intern_src = &str[(h.header_len as usize)..]; self.intern_rec(&mut obj as *mut usize as *mut Value<'a>); obj } } } /// # Safety /// /// `str` must be a valid OCaml value encoded in the /// [`Marshal`](https://v2.ocaml.org/api/Marshal.html) format. pub unsafe fn input_value<'a, A: ocamlrep::Allocator>( str: &[u8], alloc: &'a A, ) -> ocamlrep::Value<'a> { unsafe { let mut state = State::new(alloc, str); ocamlrep::Value::from_bits(state.input_val_from_string(str)) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_marshal/intext.rs
ocamlrep_marshal/intext.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![allow(dead_code)] //for now pub(crate) const MAGIC_NUMBER_SMALL: u32 = 0x8495A6BE; // 10000100100101011010011010111110 pub(crate) const MAGIC_NUMBER_BIG: u32 = 0x8495A6BF; // 10000100100101011010011010111111 /// Header format for the "small" model: 20 bytes /// 0 "small" magic number /// 4 length of marshaled data, in bytes /// 8 number of shared blocks /// 12 size in words when read on a 32-bit platform /// 16 size in words when read on a 64-bit platform /// The 4 numbers are 32 bits each, in big endian. /// /// Header format for the "big" model: 32 bytes /// 0 "big" magic number /// 4 four reserved bytes, currently set to 0 /// 8 length of marshaled data, in bytes /// 16 number of shared blocks /// 24 size in words when read on a 64-bit platform /// The 3 numbers are 64 bits each, in big endian. // --- // c.f. runtime/caml/config.h #[cfg(target_arch = "arm")] // Careful. This might not be quite right. See // https://github.com/ocaml/ocaml/issues/4224. pub(crate) const ARCH_FLOAT_ENDIANNESS: u32 = 0x45670123; #[cfg(not(target_arch = "arm"))] #[cfg(target_endian = "big")] pub(crate) const ARCH_FLOAT_ENDIANNESS: u32 = 0x76543210; #[cfg(not(target_arch = "arm"))] #[cfg(target_endian = "little")] pub(crate) const ARCH_FLOAT_ENDIANNESS: u32 = 0x01234567; // --- // Codes for the compact format pub(crate) const PREFIX_SMALL_BLOCK: u8 = 0x80; // 10000000 pub(crate) const PREFIX_SMALL_INT: u8 = 0x40; // 01000000 pub(crate) const PREFIX_SMALL_STRING: u8 = 0x20; // 00100000 pub(crate) const CODE_INT8: u8 = 0x0; // 00000000 pub(crate) const CODE_INT16: u8 = 0x1; // 00000001 pub(crate) const CODE_INT32: u8 = 0x2; // 00000010 pub(crate) const CODE_INT64: u8 = 0x3; // 00000011 pub(crate) const CODE_SHARED8: u8 = 0x4; // 00000100 pub(crate) const CODE_SHARED16: u8 = 0x5; // 00000101 pub(crate) const CODE_SHARED32: u8 = 0x6; // 00000110 pub(crate) const CODE_SHARED64: u8 = 0x14; // 00010100 pub(crate) const CODE_BLOCK32: u8 = 0x8; // 00001000 pub(crate) const CODE_BLOCK64: u8 = 0x13; // 00010011 pub(crate) const CODE_STRING8: u8 = 0x9; // 00001001 pub(crate) const CODE_STRING32: u8 = 0xA; // 00001010 pub(crate) const CODE_STRING64: u8 = 0x15; // 00010101 pub(crate) const CODE_DOUBLE_BIG: u8 = 0xB; // 00001011 pub(crate) const CODE_DOUBLE_LITTLE: u8 = 0xC; // 00001100 pub(crate) const CODE_DOUBLE_ARRAY8_BIG: u8 = 0xD; // 00001101 pub(crate) const CODE_DOUBLE_ARRAY8_LITTLE: u8 = 0xE; // 00001110 pub(crate) const CODE_DOUBLE_ARRAY32_BIG: u8 = 0xF; // 00001111 pub(crate) const CODE_DOUBLE_ARRAY32_LITTLE: u8 = 0x7; // 00000111 pub(crate) const CODE_DOUBLE_ARRAY64_BIG: u8 = 0x16; // 00010110 pub(crate) const CODE_DOUBLE_ARRAY64_LITTLE: u8 = 0x17; // 00010111 pub(crate) const CODE_CODEPOINTER: u8 = 0x10; // 00010000 pub(crate) const CODE_INFIXPOINTER: u8 = 0x11; // 00010001 pub(crate) const CODE_CUSTOM: u8 = 0x12; // 00010010 (deprecated) pub(crate) const CODE_CUSTOM_LEN: u8 = 0x18; // 00011000 pub(crate) const CODE_CUSTOM_FIXED: u8 = 0x19; // 00011001 macro_rules! cond_arch_float_endian { ($if_:ident, $else_:ident) => { if ARCH_FLOAT_ENDIANNESS == 0x76543210 { $if_ } else { $else_ } }; } pub(crate) const CODE_DOUBLE_NATIVE: u8 = cond_arch_float_endian!(CODE_DOUBLE_BIG, CODE_DOUBLE_LITTLE); pub(crate) const CODE_DOUBLE_ARRAY8_NATIVE: u8 = cond_arch_float_endian!(CODE_DOUBLE_ARRAY8_BIG, CODE_DOUBLE_ARRAY8_LITTLE); pub(crate) const CODE_DOUBLE_ARRAY32_NATIVE: u8 = cond_arch_float_endian!(CODE_DOUBLE_ARRAY32_BIG, CODE_DOUBLE_ARRAY32_LITTLE); pub(crate) const CODE_DOUBLE_ARRAY64_NATIVE: u8 = cond_arch_float_endian!(CODE_DOUBLE_ARRAY64_BIG, CODE_DOUBLE_ARRAY64_LITTLE); /// Size-ing data structures for extern. Chosen so that /// sizeof(struct trail_block) and sizeof(struct output_block) /// are slightly below 8Kb. pub(crate) const ENTRIES_PER_TRAIL_BLOCK: i16 = 1025; pub(crate) const SIZE_EXTERN_OUTPUT_BLOCK: usize = 8100;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_marshal/build.rs
ocamlrep_marshal/build.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. // Assume an opam environment (`eval "$(opam env --switch=default // --set-switch)"`) then to find the prevailing standard library caml // headers, `OCAMLLIB=$(ocamlopt.opt -config | grep standard_library: // | awk '{ print $2 }')`. fn ocamllib_dir() -> std::path::PathBuf { let mut sh = std::process::Command::new("sh"); sh.args([ "-c", "ocamlopt.opt -config | grep standard_library: | awk '{ print $2 }'", ]); let output = sh.output().unwrap().stdout; let proposed_path = std::path::Path::new(std::str::from_utf8(&output).unwrap().trim()); // A supercaml 'ocamlopt.opt' can report standard library paths that don't // exist. if proposed_path.exists() { proposed_path.to_path_buf() } else { // Fallback to guessing the location given knowledge of where // 'ocamlopt.opt' itself it. let mut sh = std::process::Command::new("sh"); sh.args(["-c", "which ocamlopt.opt"]); let output = sh.output().unwrap().stdout; std::path::Path::new(std::str::from_utf8(&output).unwrap().trim()) .ancestors() .nth(2) .unwrap() .join("lib/ocaml") } } fn main() { // Tell Cargo that if the given file changes, to rerun this build script. println!("cargo:rerun-if-changed=../../ocaml_version.c"); cc::Build::new() .include(ocamllib_dir().as_path().to_str().unwrap()) .file("ocaml_version.c") .compile("ocaml_version"); }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_marshal/ocamlrep_marshal.rs
ocamlrep_marshal/ocamlrep_marshal.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![feature(new_zeroed_alloc)] mod deser; // deserialize; c.f 'runtime/intern.c' mod intext; // c.f. 'runtime/caml/intext.h' mod ser; // serialize; c.f. 'runtime/extern.c' pub use deser::input_value; pub use ser::ExternFlags; pub use ser::output_value;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_marshal/test/ocamlrep_marshal_ffi_bindings.rs
ocamlrep_marshal/test/ocamlrep_marshal_ffi_bindings.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![feature(exit_status_error)] use ocamlrep::FromOcamlRep; type OcamlValue = usize; #[unsafe(no_mangle)] unsafe extern "C" fn ocamlrep_marshal_output_value_to_string( v: OcamlValue, flags: OcamlValue, ) -> OcamlValue { ocamlrep_ocamlpool::catch_unwind(|| unsafe { let v = ocamlrep::Value::from_bits(v); let flags = ocamlrep_marshal::ExternFlags::from_ocaml(flags).unwrap(); let mut cursor = std::io::Cursor::new(vec![]); ocamlrep_marshal::output_value(&mut cursor, v, flags).unwrap(); ocamlrep_ocamlpool::to_ocaml(&cursor.into_inner()) }) } #[unsafe(no_mangle)] unsafe extern "C" fn ocamlrep_marshal_input_value_from_string( str: OcamlValue, ofs: OcamlValue, ) -> OcamlValue { ocamlrep_ocamlpool::catch_unwind(|| unsafe { let offset = usize::from_ocaml(ofs).unwrap(); let str = ocamlrep::bytes_from_ocamlrep(ocamlrep::Value::from_bits(str)).unwrap(); let str = &str[offset..]; let pool = ocamlrep_ocamlpool::Pool::new(); ocamlrep_marshal::input_value(str, &pool).to_bits() }) } // Hack! Trick buck into believing that these libraries are used. See [Note: // Test blocks for Cargo] in `ocamlrep_ocamlpool/test/ocamlpool_test.rs`. const _: () = { #[allow(unused_imports)] use anyhow; #[allow(unused_imports)] use cargo_test_utils; #[allow(unused_imports)] use tempfile; }; #[cfg(test)] mod tests { use anyhow::Result; use cargo_test_utils::*; use tempfile::TempDir; #[test] fn ocamlrep_marshal_test() -> Result<()> { let tmp_dir = TempDir::with_prefix("ocamlrep_marshal_test.")?; std::fs::copy( "test_ocamlrep_marshal.ml", tmp_dir.path().join("test_ocamlrep_marshal.ml"), )?; let compile_cmd = cmd( "ocamlopt.opt", &[ "-verbose", "-c", "test_ocamlrep_marshal.ml", "-o", "test_ocamlrep_marshal_ml.cmx", ], Some(tmp_dir.path()), ); assert_eq!(run(compile_cmd).map_err(fmt_exit_status_err), Ok(())); let link_cmd = cmd( "ocamlopt.opt", &[ "-verbose", "-o", "ocamlrep_marshal_test", "test_ocamlrep_marshal_ml.cmx", "-ccopt", &("-L".to_owned() + workspace_dir(&["target", build_flavor()]).to_str().unwrap()), "-cclib", "-locamlrep_marshal", "-cclib", "-locamlrep_marshal_ffi_bindings", "-cclib", "-locamlrep_ocamlpool", ], Some(tmp_dir.path()), ); assert_eq!(run(link_cmd).map_err(fmt_exit_status_err), Ok(())); let ocamlrep_marshal_test_cmd = cmd( tmp_dir .path() .join("ocamlrep_marshal_test") .as_path() .to_str() .unwrap(), &[], None, ); assert_eq!( run(ocamlrep_marshal_test_cmd).map_err(fmt_exit_status_err), Ok(()) ); tmp_dir.close()?; Ok(()) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_custom/lib.rs
ocamlrep_custom/lib.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. //! Library to build `Custom_tag` OCaml values. use std::ffi::CStr; use std::ffi::CString; use std::mem::MaybeUninit; use std::ops::Deref; use std::os::raw::c_char; use std::os::raw::c_int; use std::os::raw::c_void; use std::rc::Rc; use ocamlrep::Allocator; use ocamlrep::CUSTOM_TAG; use ocamlrep::FromError; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use ocamlrep::Value; use ocamlrep::from; use ocamlrep_ocamlpool::catch_unwind; unsafe extern "C" { fn caml_register_custom_operations(ops: *const CustomOperations); fn caml_serialize_block_1(data: *const u8, len: usize); fn caml_serialize_int_8(x: i64); fn caml_deserialize_sint_8() -> i64; fn caml_deserialize_block_1(data: *mut u8, len: usize); } /// Struct containing the operations for a custom OCaml block. /// /// This is the Rust encoding of OCaml's `struct custom_operations`. /// /// For more information on the fields see /// [the OCaml guide](https://caml.inria.fr/pub/docs/manual-ocaml/intfc.html#ss:c-custom-ops) #[repr(C)] pub struct CustomOperations { identifier: *const c_char, finalize: Option<extern "C" fn(usize) -> ()>, compare: Option<extern "C" fn(usize, usize) -> c_int>, hash: Option<extern "C" fn(usize) -> isize>, serialize: Option<extern "C" fn(usize, *mut usize, *mut usize) -> ()>, deserialize: Option<extern "C" fn(*mut c_void) -> usize>, compare_ext: Option<extern "C" fn(usize, usize) -> c_int>, /// Not implemented yet, always set to NULL. custom_fixed_length: *const c_void, } impl CustomOperations { /// Create a new custom block with the given identifier. /// /// All function pointers will be set to NULL by default. fn new(identifier: &'static CStr) -> Self { Self { identifier: identifier.as_ptr(), finalize: None, compare: None, hash: None, serialize: None, deserialize: None, compare_ext: None, custom_fixed_length: std::ptr::null(), } } } /// A wrapper around a Rust type that allows it /// to be written into/read from OCaml memory and managed by /// the OCaml GC. /// /// The value still lives on the Rust heap in an `Rc`'d pointer, /// and the `Rc`-pointer itself will be written to OCaml memory. /// /// # Examples /// /// Expose Rust type: /// /// ```rust /// use std::cell::Cell; /// /// use ocamlrep_custom::CamlSerialize; /// use ocamlrep_custom::Custom; /// use ocamlrep_custom::caml_serialize_default_impls; /// use ocamlrep_ocamlpool::ocaml_ffi; /// /// pub struct Counter(Cell<isize>); /// /// impl CamlSerialize for Counter { /// caml_serialize_default_impls!(); /// } /// /// ocaml_ffi! { /// fn counter_new() -> Custom<Counter> { /// Custom::from(Counter(Cell::new(0))) /// } /// /// fn counter_inc(counter: Custom<Counter>) -> Custom<Counter> { /// counter.0.set(counter.0.get() - 1); /// counter /// } /// /// fn counter_read(counter: Custom<Counter>) -> isize { /// counter.0.get() /// } /// } /// ``` /// /// From OCaml: /// /// ```ocaml /// type counter; (* abstract type *) /// /// external counter_new : unit -> counter = "counter_new" /// external counter_inc: counter -> unit = "counter_inc" /// external counter_read : counter -> isize = "counter_read" /// /// let () = /// let cnt = counter_new () in (* will be dropped on GC finalization *) /// assert (counter_read cnt == 0); /// counter_inc cnt; /// assert (counter_read cnt == 1) /// ``` pub struct Custom<T: CamlSerialize>(Rc<T>); impl<T: CamlSerialize> Custom<T> { /// Create a new `ToCustom` wrapper by taking ownership of the value. pub fn from(x: T) -> Self { Self::new(Rc::new(x)) } /// Create a new `ToCustom` directly from an `Rc`'d value. pub fn new(x: Rc<T>) -> Self { Self(x) } /// Get a reference to the inner `Rc` pub fn inner(&self) -> &Rc<T> { &self.0 } } impl<T: CamlSerialize> Deref for Custom<T> { type Target = T; fn deref(&self) -> &T { self.0.deref() } } /// A custom block has two words: a pointer to the CustomOperations struct, /// and a pointer the the value. Our values are ref-counted, but an Rc pointer /// is just pointer-sized. #[repr(C)] struct CustomBlockOcamlRep<T>(&'static CustomOperations, Rc<T>); const CUSTOM_BLOCK_SIZE_IN_BYTES: usize = std::mem::size_of::<CustomBlockOcamlRep<()>>(); const CUSTOM_BLOCK_SIZE_IN_WORDS: usize = CUSTOM_BLOCK_SIZE_IN_BYTES / std::mem::size_of::<Value<'_>>(); impl<T: CamlSerialize> ToOcamlRep for Custom<T> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { let ops: &'static CustomOperations = <T as CamlSerialize>::operations(); let mut block = alloc.block_with_size_and_tag(CUSTOM_BLOCK_SIZE_IN_WORDS, CUSTOM_TAG); // Safety: we don't call any method on `alloc` after this method. let block_ptr: *mut Value<'_> = unsafe { alloc.block_ptr_mut(&mut block) }; // Safety: `alloc` guarantees that the `block_ptr` returned by // `block_ptr_mut` is aligend to `align_of::<Value>()` and valid // for reads and writes of `CUSTOM_BLOCK_SIZE_IN_WORDS * // size_of::<Value>()` bytes. Since `CustomBlockOcamlRep` has size // `CUSTOM_BLOCK_SIZE_IN_WORDS * size_of::<Value>()`, its // alignment is equal to `align_of::<Value>()`, and no other // reference to our newly-allocated block can exist, it's safe for us to // interpret `block_ptr` as a `&mut CustomBlockOcamlRep`. let block_ptr = block_ptr as *mut MaybeUninit<CustomBlockOcamlRep<T>>; let custom_block = unsafe { block_ptr.as_mut().unwrap() }; // Write the address of the operations struct to the first word, and the // pointer to the value to the second word. *custom_block = MaybeUninit::new(CustomBlockOcamlRep(ops, Rc::clone(&self.0))); block.build() } } impl<T: CamlSerialize> FromOcamlRep for Custom<T> { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let rc = rc_from_value::<T>(value)?; let rc = Rc::clone(rc); Ok(Custom::new(rc)) } } /// Helper function to fetch a reference to the `Rc` from the OCaml representation /// of a custom block. fn rc_from_value<T: CamlSerialize>(value: Value<'_>) -> Result<&Rc<T>, FromError> { let block = from::expect_block(value)?; from::expect_block_tag(block, CUSTOM_TAG)?; from::expect_block_size(block, CUSTOM_BLOCK_SIZE_IN_WORDS)?; // We still don't know whether this block is in fact a // CustomBlockOcamlRep<T>--it may be a CustomBlockOcamlRep<U>, or some // other custom block which happens to be the same size. We can verify // that the block is actually a CustomBlockOcamlRep<T> by checking that // it points to the correct CustomOperations struct. let ops = <T as CamlSerialize>::operations(); if !std::ptr::eq(ops, block[0].to_bits() as *const CustomOperations) { return Err(FromError::UnexpectedCustomOps { expected: ops as *const _ as usize, actual: block[0].to_bits(), }); } let value_ptr = value.to_bits() as *const CustomBlockOcamlRep<T>; // Safety: `value_ptr` is guaranteed to be aligned to // `align_of::<Value>()`, and our use of `expect_block_size` guarantees // that the pointer is valid for reads of `CUSTOM_BLOCK_SIZE_IN_WORDS * // `size_of::<Value>()` bytes. Since the first field points to the right // operations struct, we either have a valid `CustomBlockOCamlRep<T>` // (i.e., constructed above in our `ToOcamlRep` implementation) or // someone went out of their way to construct an invalid one. Assume // it's valid and read in the `CustomBlockOcamlRep<T>`. let custom_block = unsafe { value_ptr.as_ref().unwrap() }; Ok(&custom_block.1) } /// Trait that allows OCaml serialization and deserialization. /// /// If you want to support serialization/deserialization, you /// **MUST** call `CamlSerialize::register()` when starting up /// the program. /// /// This will register your type in the OCaml runtime, allowing /// deserialization. /// /// Rust does not support different instantiations of the default /// implementation for different implementors of trait types. Therefore, /// you must implement `type_identifier`, `operations` and `register` /// manually when implementing this trait for a type. You can use /// the `caml_serialize_default_impls!()` to do that automatically: /// /// ``` /// impl CamlSerialize for MyType { /// caml_serialize_default_impls!(); /// } /// ``` pub trait CamlSerialize: Sized { /// Get the type name. fn type_identifier() -> &'static CStr; /// Get the type's custom operations struct. /// /// Always has to return the same reference! If not, the /// OCaml-to-Rust conversion will fail. /// /// The returned structure is not intended to be used by /// a programmer. Using it directly by e.g. injecting it /// into OCaml custom blocks is dangerous and can cause /// undefined behavior. Don't do it! fn operations() -> &'static CustomOperations; /// Register the type with the OCaml system. /// /// # Safety /// /// Must not be called from multiple threads. /// /// This function interacts with the OCaml runtime, which is not thread-safe. /// If any other threads are attempting to interact with the OCaml runtime /// or its custom operations table (e.g., by invoking this function, or by /// executing OCaml code using custom blocks) when this function is invoked, /// undefined behavior will result. /// /// # Examples /// /// ``` /// use ocamlrep_custom::CamlSerialize; /// use ocamlrep_ocamlpool::ocaml_ffi; /// /// struct IntBox(isize); /// /// impl CamlSerialize for IntBox { /// caml_serialize_default_impls!(); /// fn serialize(&self) -> Vec<u8> { ... } /// fn deserialize(buffer: &[u8]) -> Self { ... } /// } /// /// ocaml_ffi! { /// fn register_custom_types() { /// // Once `register_custom_types` has been invoked from OCaml, IntBox /// // can be serialized and deserialized from OCaml using the Marshal /// // module. /// // /// // Safety: this will be called from OCaml, as such nothing else will /// // be interacting with the OCaml runtime. /// unsafe { IntBox::register() }; /// } /// } /// ``` unsafe fn register(); /// Convert a value to an array of bytes. /// /// The default implementation panics. fn serialize(&self) -> Vec<u8> { panic!( "serialization not implemented for {:?}", Self::type_identifier() ) } /// Deserialize a value form an array of bytes. /// /// The default implementation panics. fn deserialize(_data: &[u8]) -> Self { panic!( "deserialization not implemented for {:?}", Self::type_identifier() ) } } #[macro_export] macro_rules! caml_serialize_default_impls { () => { fn type_identifier() -> &'static std::ffi::CStr { static ONCE: std::sync::Once = std::sync::Once::new(); static mut TYPE_NAME: Option<std::ffi::CString> = None; ONCE.call_once(|| { // Safety: // - We've gated initialization, so it's thread safe. // - We only set the constant once. unsafe { TYPE_NAME = Some($crate::type_identifier_helper::<Self>()); } }); // Safety: // - By now the constant has been initialized, and once initialized // it is never changes. // - Concurrent reads are OK. unsafe { TYPE_NAME.as_ref().unwrap() } } fn operations() -> &'static $crate::CustomOperations { static ONCE: std::sync::Once = std::sync::Once::new(); static mut OPS_STRUCT: Option<$crate::CustomOperations> = None; ONCE.call_once(|| { // Safety: // - We've gated initialization, so it's thread safe. // - We only set the constant once. unsafe { OPS_STRUCT = Some($crate::operations_helper::<Self>()); } }); // Safety: // - By now the constant has been initialized, and once initialized // it is never changes. // - Concurrent reads are OK. unsafe { OPS_STRUCT.as_ref().unwrap() } } unsafe fn register() { static mut IS_REGISTERED: bool = false; // Safety: Can only be called in a single-threaded context! if IS_REGISTERED { return; } IS_REGISTERED = true; let ops = Self::operations(); $crate::register_helper::<Self>(ops) } }; } /// Helper used for the `caml_serialize_default_impls` macro pub fn type_identifier_helper<T>() -> CString { let name = format!("ocamlrep.custom.{}", std::any::type_name::<T>()); std::ffi::CString::new(name).unwrap() } /// Helper used for the `caml_serialize_default_impls` macro pub fn operations_helper<T: CamlSerialize>() -> CustomOperations { let type_identifier = <T as CamlSerialize>::type_identifier(); let mut ops = CustomOperations::new(type_identifier); ops.finalize = Some(drop_value::<T>); ops.serialize = Some(serialize_value::<T>); ops.deserialize = Some(deserialize_value::<T>); ops } /// Helper used for the `caml_serialize_default_impls` macro /// /// # Safety /// /// Should not be used directly. Interacts with the OCaml runtime and is /// thus unsafe to call in a multi-threaded context. pub unsafe fn register_helper<T>(ops: &'static CustomOperations) { unsafe { // Safety: operations struct has a static lifetime, it will live forever! caml_register_custom_operations(ops as *const CustomOperations); } } /// Helper function used by `operations_helper`. Returns a finalizer for custom /// blocks containing an `Rc<T>`. extern "C" fn drop_value<T: CamlSerialize>(value: usize) { let _: usize = catch_unwind(|| { // Safety: We trust here that CustomOperations structs containing this // `drop_value` instance will only ever be referenced by custom blocks // matching the layout of `CustomBlockOcamlRep`. If that's so, then this // function should only be invoked by the OCaml runtime on a pointer to // a CustomBlockOcamlRep<T> created by T::to_ocamlrep. Such a pointer // would be aligned and valid. let custom_block_ptr = value as *mut CustomBlockOcamlRep<T>; let custom_block = unsafe { custom_block_ptr.as_mut().unwrap() }; // The `Rc` will be dropped here, and its reference count will decrease // by one (possibly freeing the referenced value). // Safety: Since the OCaml runtime will only invoke the finalizer for a // value which will never again be used, it is safe to use // `drop_in_place` (i.e., our finalizer will only be invoked once, so we // won't cause a double-drop). unsafe { std::ptr::drop_in_place(&mut custom_block.1); } 0 }); } /// Helper function for serialization. Interacts with the OCaml runtime, so must /// only be invoked by the OCaml runtime when serializing a custom block. extern "C" fn serialize_value<T: CamlSerialize>( value: usize, bsize_32: *mut usize, bsize_64: *mut usize, ) { let _: usize = catch_unwind(|| { // Safety: Only called by the OCaml runtime (we don't expose a means of // invoking this function from Rust), which provides some OCaml // CUSTOM_TAG block as the value. let value = unsafe { Value::from_bits(value) }; // Only called by the OCaml runtime, when serializing // a Custom-object managed by the OCaml GC. let rc = rc_from_value::<T>(value).unwrap(); let bytes: Vec<u8> = rc.serialize(); let bytes_ptr = bytes.as_ptr(); // Safety: As above, we don't expose a means of invoking this function // from Rust--it can only be invoked by the OCaml runtime while // serializing a value. It is safe to invoke OCaml serialization // functions in this context. unsafe { let len = bytes.len(); caml_serialize_int_8(len.try_into().unwrap()); caml_serialize_block_1(bytes_ptr, len); // The size taken up in the data-part of the custom block. *bsize_32 = std::mem::size_of::<u32>(); *bsize_64 = std::mem::size_of::<u64>(); } 0 }); } /// Helper function for deserialization. Interacts with the OCaml runtime, so must /// only be invoked by the OCaml runtime when serializing a custom block. extern "C" fn deserialize_value<T: CamlSerialize>(data_ptr: *mut c_void) -> usize { catch_unwind(|| { // Get the serialized bytes from the input channel. let bytes = unsafe { // Safety: We don't expose a means of invoking this function from // Rust--`deserialize_value` can only be invoked by the OCaml // runtime while deserializing a custom block value. It is safe to // invoke OCaml deserialization functions in this context. let len: usize = caml_deserialize_sint_8().try_into().unwrap(); let mut buf: Vec<u8> = Vec::with_capacity(len); // Safety: len <= capacity. The elements aren't initialized at this // time, but we trust that caml_deserialize_block_1 will fill `len` // bytes of the buffer. #[allow(clippy::uninit_vec)] buf.set_len(len); // Safety: As above, `deserialize_value` can only be invoked by the // OCaml runtime during custom block deserialization. caml_deserialize_block_1(buf.as_mut_ptr(), len); buf }; // Actually deserialize those bytes into a T. let val: T = CamlSerialize::deserialize(&bytes); // Safety: The OCaml runtime will give us a data buffer which is // usize-aligned and valid for reads and writes of bsize_32 or bsize_64 // (as provided by `serialize_value`, above) bytes (depending on system // architecture). This is sufficient for `Rc<T>` (which has the size and // alignment of usize). let data_ptr = data_ptr as *mut MaybeUninit<Rc<T>>; let data = unsafe { data_ptr.as_mut().unwrap() }; *data = MaybeUninit::new(Rc::new(val)); // Return the size of the value we wrote to our output pointer. The // OCaml runtime will verify that it matches the expected // bsize_32/bsize_64 written by the serializer. std::mem::size_of_val(data) }) } #[cfg(test)] mod test { use std::mem::*; use super::*; #[test] fn custom_block_ocamlrep_size() { assert_eq!( size_of::<CustomBlockOcamlRep<u8>>(), 2 * size_of::<Value<'_>>() ); } #[test] fn custom_block_ocamlrep_align() { assert_eq!( align_of::<CustomBlockOcamlRep<u8>>(), align_of::<Value<'_>>() ); } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_custom/test/counter.rs
ocamlrep_custom/test/counter.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![feature(exit_status_error)] use std::cell::Cell; use ocamlrep_custom::CamlSerialize; use ocamlrep_custom::Custom; use ocamlrep_custom::caml_serialize_default_impls; use ocamlrep_ocamlpool::ocaml_ffi; pub struct Counter(Cell<isize>); impl CamlSerialize for Counter { caml_serialize_default_impls!(); } ocaml_ffi! { fn counter_new() -> Custom<Counter> { Custom::from(Counter(Cell::new(0))) } fn counter_inc(counter: Custom<Counter>) -> Custom<Counter> { counter.0.set(counter.0.get() + 1); counter } fn counter_read(counter: Custom<Counter>) -> isize { counter.0.get() } } // Hack! Trick buck into believing that these libraries are used. See [Note: // Test blocks for Cargo] in `ocamlrep_ocamlpool/test/ocamlpool_test.rs`. const _: () = { #[allow(unused_imports)] use anyhow; #[allow(unused_imports)] use cargo_test_utils; #[allow(unused_imports)] use tempfile; }; #[cfg(test)] mod tests { use anyhow::Result; use cargo_test_utils::*; use tempfile::TempDir; #[test] fn counter_test() -> Result<()> { let tmp_dir = TempDir::with_prefix("ocamlrep_custom_test.")?; std::fs::copy( "counter_client.ml", tmp_dir.path().join("counter_client.ml"), )?; let compile_cmd = cmd( "ocamlopt.opt", &[ "-verbose", "-c", "counter_client.ml", "-o", "counter_client_ml.cmx", ], Some(tmp_dir.path()), ); assert_eq!(run(compile_cmd).map_err(fmt_exit_status_err), Ok(())); let link_cmd = cmd( "ocamlopt.opt", &[ "-verbose", "-o", "counter_test", "counter_client_ml.cmx", "-ccopt", &("-L".to_owned() + workspace_dir(&["target", build_flavor()]).to_str().unwrap()), "-cclib", "-lcounter", "-cclib", "-locamlrep_ocamlpool", ], Some(tmp_dir.path()), ); assert_eq!(run(link_cmd).map_err(fmt_exit_status_err), Ok(())); let counter_test_cmd = cmd( tmp_dir .path() .join("counter_test") .as_path() .to_str() .unwrap(), &[], None, ); assert_eq!(run(counter_test_cmd).map_err(fmt_exit_status_err), Ok(())); tmp_dir.close()?; Ok(()) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_custom/test/test_custom.rs
ocamlrep_custom/test/test_custom.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::cell::RefCell; use std::io::Write; use std::rc::Rc; use ocamlrep_custom::CamlSerialize; use ocamlrep_custom::Custom; use ocamlrep_custom::caml_serialize_default_impls; use ocamlrep_ocamlpool::ocaml_ffi; struct DropTest(Rc<RefCell<bool>>); impl CamlSerialize for DropTest { caml_serialize_default_impls!(); } struct DropTestCell(Rc<RefCell<bool>>); impl CamlSerialize for DropTestCell { caml_serialize_default_impls!(); } impl DropTest { pub fn new() -> Self { Self(Rc::new(RefCell::new(false))) } pub fn cell(&self) -> Rc<RefCell<bool>> { self.0.clone() } } impl Drop for DropTest { fn drop(&mut self) { *self.0.borrow_mut() = true; } } ocaml_ffi! { fn test_custom_drop_test_new() -> Custom<DropTest> { Custom::from(DropTest::new()) } fn test_custom_drop_test_custom_ref_count(x: Custom<DropTest>) -> usize { let w = Rc::downgrade(x.inner()); drop(x); w.strong_count() } fn test_custom_drop_test_get_cell(x: Custom<DropTest>) -> Custom<DropTestCell> { Custom::from(DropTestCell(x.cell())) } fn test_custom_drop_test_cell_is_dropped(x: Custom<DropTestCell>) -> bool { *x.0.borrow() } } struct BoxedInt(isize); impl CamlSerialize for BoxedInt { caml_serialize_default_impls!(); fn serialize(&self) -> Vec<u8> { let mut buffer = Vec::new(); buffer.write_all(&self.0.to_be_bytes()).unwrap(); buffer } fn deserialize(buffer: &[u8]) -> Self { let i = isize::from_be_bytes(buffer[0..std::mem::size_of::<isize>()].try_into().unwrap()); BoxedInt(i) } } ocaml_ffi! { fn test_custom_boxed_int_register() { // Safety: called from OCaml in a single-threaded context. unsafe { BoxedInt::register(); } } fn test_custom_boxed_int_new(x: isize) -> Custom<BoxedInt> { Custom::from(BoxedInt(x)) } fn test_custom_boxed_int_equal(x: Custom<BoxedInt>, y: Custom<BoxedInt>) -> bool { x.0 == y.0 } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/cargo_test_utils/cargo_test_utils.rs
cargo_test_utils/cargo_test_utils.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![feature(exit_status_error)] use std::process::Command; use std::process::ExitStatusError; pub fn cmd(prog: &str, args: &[&str], dir: Option<&std::path::Path>) -> Command { let mut prog_cmd = Command::new(prog); if let Some(path) = dir { prog_cmd.current_dir(path); } prog_cmd.args(args); prog_cmd } pub fn workspace_dir(ds: &[&str]) -> std::path::PathBuf { let mut cargo_cmd = cmd( "cargo", &["locate-project", "--workspace", "--message-format=plain"], None, ); let output = cargo_cmd.output().unwrap().stdout; let root_cargo_toml = std::path::Path::new(std::str::from_utf8(&output).unwrap().trim()); let mut p = root_cargo_toml.parent().unwrap().to_path_buf(); for d in ds { p.push(d); } p } pub fn run(mut cmd: Command) -> Result<(), ExitStatusError> { cmd.spawn().unwrap().wait().ok().unwrap().exit_ok() } pub fn fmt_exit_status_err(err: ExitStatusError) -> String { format!("error status: {err}") } pub fn build_flavor() -> &'static str { if cfg!(debug_assertions) { "debug" } else { "release" } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml_attr/rust_to_ocaml_attr.rs
rust_to_ocaml/rust_to_ocaml_attr/rust_to_ocaml_attr.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use proc_macro::TokenStream; /// This attribute macro is intended to be consumed by the rust_to_ocaml codegen /// tool, so this proc macro doesn't need to do anything other than return the /// item (with the rust_to_ocaml attribute stripped by rustc). /// /// Use of the rust_to_ocaml attribute in positions other than items (like field /// definitions) are stripped by ocamlrep_derive macros (which is simpler than /// filtering them from the `item` in this crate). /// /// We may want to add validation later so that incorrect use of the attribute /// emits errors at compile time, but stripping is good enough for now. #[proc_macro_attribute] pub fn rust_to_ocaml(_attr: TokenStream, item: TokenStream) -> TokenStream { item }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml/config.rs
rust_to_ocaml/rust_to_ocaml/config.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::str::FromStr; use indexmap::IndexMap; use indexmap::IndexSet; use indexmap::indexmap; use indexmap::indexset; use serde::Deserialize; use serde::Serialize; use crate::ir; use crate::ir::ModuleName; use crate::ir::TypeName; #[derive(Debug, Default, Serialize, Deserialize)] pub struct Config { #[serde(default)] modules: ModulesConfig, #[serde(default)] types: TypesConfig, } #[derive(Debug, Default, Serialize, Deserialize)] struct ModulesConfig { #[serde(default, with = "indexmap::map::serde_seq")] rename: IndexMap<ModuleName, ModuleName>, } #[derive(Debug, Serialize, Deserialize)] struct TypesConfig { transparent: IndexSet<RustTypePath>, #[serde(with = "indexmap::map::serde_seq")] rename: IndexMap<RustTypePath, OcamlTypePath>, } #[derive(Clone, PartialEq, Eq, Hash)] pub struct RustTypePath { pub modules: Vec<ModuleName>, pub ty: TypeName, } #[derive(Clone, PartialEq, Eq, Hash)] pub struct OcamlTypePath { pub modules: Vec<ModuleName>, pub ty: TypeName, } impl Config { pub fn get_renamed_module(&self, name: &ModuleName) -> Option<ModuleName> { self.modules.rename.get(name).cloned() } pub fn is_transparent_type(&self, path: &ir::TypePath) -> bool { let rust_path = RustTypePath::from(path); self.types.transparent.contains(&rust_path) } pub fn get_renamed_type(&self, path: &ir::TypePath) -> Option<OcamlTypePath> { let rust_path = RustTypePath::from(path); self.types.rename.get(&rust_path).cloned() } } impl Default for TypesConfig { fn default() -> Self { let r = |s| RustTypePath::from_str(s).unwrap(); let o = |s| OcamlTypePath::from_str(s).unwrap(); Self { transparent: indexset! { r("Box"), r("std::boxed::Box"), r("Rc"), r("std::rc::Rc"), r("Arc"), r("std::sync::Arc"), }, rename: indexmap! { r("Vec") => o("list"), r("std::vec::Vec") => o("list"), }, } } } impl From<&ir::TypePath> for RustTypePath { fn from(path: &ir::TypePath) -> Self { Self { modules: path.modules.clone(), ty: path.ty.clone(), } } } impl std::fmt::Display for RustTypePath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for m in self.modules.iter() { write!(f, "{}", m.as_str())?; write!(f, "::")?; } write!(f, "{}", self.ty.as_str()) } } impl std::fmt::Display for OcamlTypePath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for m in self.modules.iter() { write!(f, "{}", m.as_str())?; write!(f, ".")?; } write!(f, "{}", self.ty.as_str()) } } impl std::fmt::Debug for RustTypePath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { format!("{self}").fmt(f) } } impl std::fmt::Debug for OcamlTypePath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { format!("{self}").fmt(f) } } impl FromStr for RustTypePath { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<Self> { let (modules, ty) = parse_type_path(s, "::")?; Ok(Self { modules, ty }) } } impl FromStr for OcamlTypePath { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<Self> { let (modules, ty) = parse_type_path(s, ".")?; Ok(Self { modules, ty }) } } fn parse_type_path(s: &str, sep: &str) -> anyhow::Result<(Vec<ModuleName>, TypeName)> { let mut split = s.rsplit(sep); let ty = match split.next() { None | Some("") => anyhow::bail!("Invalid type name: {:?}", s), Some(ty) => TypeName(ty.to_owned()), }; let mut modules = split.map(ModuleName::new).collect::<Result<Vec<_>, _>>()?; modules.reverse(); Ok((modules, ty)) } serde_from_display!(RustTypePath, "a valid Rust type path string"); serde_from_display!(OcamlTypePath, "a valid OCaml type path string");
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml/rust_to_ocaml.rs
rust_to_ocaml/rust_to_ocaml/rust_to_ocaml.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #[macro_use] mod macros; mod config; mod convert; mod ir; mod rewrite_module_names; mod rewrite_types; use std::io::Write; use std::path::Path; use std::path::PathBuf; use anyhow::Context; use anyhow::Result; use crate::config::Config; #[derive(Debug, clap::Parser)] struct Opts { /// The Hack source file to convert. #[clap(value_name("FILEPATH"))] filename: PathBuf, /// The OCaml source file to generate. #[clap(value_name("OUTPATH"))] out_path: Option<PathBuf>, /// Path to a configuration file. #[clap(long)] config: Option<PathBuf>, /// Command to regenerate the output. This text will be included in generated file headers. #[clap(long)] regen_cmd: Option<String>, /// Do not add copyright header and generated tag (for tests). #[clap(long)] no_header: bool, /// Path to an OCaml formatter binary which will be used on the generated output. #[clap(long)] formatter: Option<String>, } fn main() -> Result<()> { let opts = <Opts as clap::Parser>::from_args(); let config = Box::leak(Box::new(match opts.config { Some(path) => { let contents = std::fs::read_to_string(&path) .with_context(|| format!("Failed to read config file at {}", path.display()))?; toml::from_str(&contents) .with_context(|| format!("Failed to parse config file at {}", path.display()))? } None => Config::default(), })); let src = std::fs::read_to_string(&opts.filename) .with_context(|| format!("Failed to read input file {}", opts.filename.display()))?; let file = syn::parse_file(&src)?; let mut ocaml_src = convert::convert_file(config, &opts.filename, &file)?; if !opts.no_header { ocaml_src = attach_header(opts.regen_cmd.as_deref(), &ocaml_src); } let absolute_filename = opts.filename.canonicalize()?; let mut ocaml_src = ocamlformat( opts.formatter.as_deref(), opts.out_path .as_deref() .and_then(Path::parent) .or_else(|| absolute_filename.parent()), ocaml_src.into_bytes(), ) .context("failed to run ocamlformat")?; if !opts.no_header { ocaml_src = signed_source::sign_file(&ocaml_src)?; } if let Some(out_path) = &opts.out_path { write_file(out_path, &ocaml_src)?; } else { let mut stdout = std::io::stdout().lock(); stdout.write_all(&ocaml_src)?; } Ok(()) } fn attach_header(regen_cmd: Option<&str>, contents: &str) -> String { let regen_cmd = regen_cmd.map_or_else(String::new, |regen_cmd| { format!(" *\n * To regenerate this file, run:\n * {}\n", regen_cmd) }); format!( r#"(* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * * {} {} *) {}"#, signed_source::SIGNING_TOKEN, regen_cmd, contents ) } fn ocamlformat( formatter: Option<&str>, out_dir: Option<&Path>, contents: Vec<u8>, ) -> Result<Vec<u8>> { let formatter = match formatter { None => return Ok(contents), Some(f) => f, }; // Even if we format the file on disk (i.e., at `opts.out_path`), // ocamlformat won't look for an .ocamlformat file in the directory // containing the file. It only looks up from the current working directory. // There's a --root arg, but it doesn't seem to produce the same behavior. let prev_dir = std::env::current_dir()?; if let Some(out_dir) = out_dir { std::env::set_current_dir(out_dir)?; } let mut child = std::process::Command::new(formatter) .stdin(std::process::Stdio::piped()) .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) // In the event that an .ocamlformat file is still not available, tell // ocamlformat to please format it anyway. .arg("--enable-outside-detected-project") .arg("--impl") .arg("-") .spawn()?; let child_stdin = child.stdin.as_mut().unwrap(); child_stdin.write_all(&contents)?; let output = child.wait_with_output()?; if !output.status.success() { anyhow::bail!("Formatter failed:\n{:#?}", output); } if out_dir.is_some() { std::env::set_current_dir(prev_dir)?; } Ok(output.stdout) } fn write_file(path: &Path, contents: &[u8]) -> Result<()> { let mut file = std::fs::File::create(path)?; file.write_all(contents)?; Ok(()) }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml/rewrite_module_names.rs
rust_to_ocaml/rust_to_ocaml/rewrite_module_names.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use crate::Config; use crate::ir; pub fn rewrite_file(config: &'static Config, file: &mut ir::File) { let rewriter = Rewriter { config }; rewriter.rewrite_module(&mut file.root) } struct Rewriter { config: &'static Config, } impl Rewriter { fn rewrite_module(&self, module: &mut ir::Module) { module.defs.iter_mut().for_each(|def| self.rewrite_def(def)) } fn rewrite_def(&self, def: &mut ir::Def) { match def { ir::Def::Module(module) => self.rewrite_module(module), ir::Def::Alias { ty, .. } => self.rewrite_type(ty), ir::Def::Record { fields, .. } => fields.iter_mut().for_each(|f| self.rewrite_field(f)), ir::Def::Variant { variants, .. } => { variants.iter_mut().for_each(|v| self.rewrite_variant(v)) } } } fn rewrite_field(&self, field: &mut ir::Field) { self.rewrite_type(&mut field.ty) } fn rewrite_variant(&self, variant: &mut ir::Variant) { variant .fields .iter_mut() .for_each(|f| self.rewrite_variant_fields(f)) } fn rewrite_variant_fields(&self, fields: &mut ir::VariantFields) { match fields { ir::VariantFields::Unnamed(tys) => tys.iter_mut().for_each(|ty| self.rewrite_type(ty)), ir::VariantFields::Named(fields) => { fields.iter_mut().for_each(|f| self.rewrite_field(f)) } } } fn rewrite_type(&self, ty: &mut ir::Type) { match ty { ir::Type::Path(path) => self.rewrite_type_path(path), ir::Type::Tuple(tuple) => self.rewrite_type_tuple(tuple), } } fn rewrite_type_path(&self, path: &mut ir::TypePath) { match path.modules.first().map(ir::ModuleName::as_str) { Some("crate" | "super") => { path.modules.remove(0); } _ => {} } path.modules.iter_mut().for_each(|m| { if let Some(name) = self.config.get_renamed_module(m) { *m = name; } }); path.targs.iter_mut().for_each(|ty| self.rewrite_type(ty)) } fn rewrite_type_tuple(&self, tuple: &mut ir::TypeTuple) { tuple.elems.iter_mut().for_each(|ty| self.rewrite_type(ty)) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml/convert.rs
rust_to_ocaml/rust_to_ocaml/convert.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use anyhow::Context; use anyhow::Result; use anyhow::bail; use anyhow::ensure; use convert_case::Case; use convert_case::Casing; use crate::Config; use crate::ir; use crate::ir::Def; use crate::ir::FieldName; use crate::ir::File; use crate::ir::TypeName; use crate::ir::VariantName; pub fn convert_file( config: &'static Config, filename: &std::path::Path, file: &syn::File, ) -> Result<String> { let defs = (file.items.iter()) .filter_map(|item| ItemConverter::convert_item(config, item).transpose()) .collect::<Result<_>>()?; let file_stem = filename.file_stem().context("expected nonempty filename")?; let module_name = file_stem.to_str().context("non-UTF8 filename")?.to_owned(); let mut file = File { root: ir::Module { name: ir::ModuleName::new(module_name)?, defs, }, }; crate::rewrite_types::rewrite_file(config, &mut file); crate::rewrite_module_names::rewrite_file(config, &mut file); Ok(file.to_string()) } struct ItemConverter { config: &'static Config, tparams: Vec<String>, } impl ItemConverter { fn convert_item(config: &'static Config, item: &syn::Item) -> Result<Option<Def>> { use syn::Item; match item { Item::Type(item) => { let this = ItemConverter::new(config, &item.generics); Ok(Some(this.convert_item_type(item).with_context(|| { format!("Failed to convert type {}", item.ident) })?)) } Item::Struct(item) => { let this = ItemConverter::new(config, &item.generics); Ok(Some(this.convert_item_struct(item).with_context(|| { format!("Failed to convert type {}", item.ident) })?)) } Item::Enum(item) => { let this = ItemConverter::new(config, &item.generics); Ok(Some(this.convert_item_enum(item).with_context(|| { format!("Failed to convert type {}", item.ident) })?)) } Item::Mod(item) => { if let Some((_brace, items)) = &item.content { let defs = items .iter() .filter_map(|item| Self::convert_item(config, item).transpose()) .collect::<Result<_>>() .with_context(|| format!("Failed to convert module {}", item.ident))?; Ok(Some(Def::Module(ir::Module { name: ir::ModuleName::new(item.ident.to_string())?, defs, }))) } else { Ok(None) } } _ => Ok(None), } } fn new(config: &'static Config, generics: &syn::Generics) -> Self { let tparams = generics .type_params() .map(|tparam| tparam.ident.to_string()) .collect(); Self { config, tparams } } fn convert_item_type(self, item: &syn::ItemType) -> Result<Def> { let name = TypeName(item.ident.to_string().to_case(Case::Snake)); let attrs = attr_parser::Attrs::from_type(item); let ty = self.convert_type(&item.ty)?; Ok(Def::Alias { doc: attrs.doc, attrs: attrs.attrs, mutual_rec: attrs.mutual_rec, tparams: self.tparams, name, ty, }) } fn convert_item_struct(self, item: &syn::ItemStruct) -> Result<Def> { let name = TypeName(item.ident.to_string().to_case(Case::Snake)); let container_attrs = attr_parser::Attrs::from_struct(item); match &item.fields { syn::Fields::Unit => Ok(Def::Alias { doc: container_attrs.doc, attrs: container_attrs.attrs, mutual_rec: container_attrs.mutual_rec, tparams: self.tparams, name, ty: ir::Type::Path(ir::TypePath::simple("unit")), }), syn::Fields::Unnamed(fields) => { let elems = (fields.unnamed.iter()) .map(|field| self.convert_type(&field.ty)) .collect::<Result<Vec<_>>>()?; Ok(Def::Alias { doc: container_attrs.doc, attrs: container_attrs.attrs, mutual_rec: container_attrs.mutual_rec, tparams: self.tparams, name, ty: if elems.is_empty() { ir::Type::Path(ir::TypePath::simple("unit")) } else { ir::Type::Tuple(ir::TypeTuple { elems }) }, }) } syn::Fields::Named(fields) => { let fields = (fields.named.iter()) .map(|field| { let field_attrs = attr_parser::Attrs::from_field(field); let name = if let Some(name) = field_attrs.name { FieldName(name) } else { field_name(field.ident.as_ref(), container_attrs.prefix.as_deref()) }; let ty = self.convert_type(&field.ty)?; Ok(ir::Field { name, ty, doc: field_attrs.doc, attrs: field_attrs.attrs, }) }) .collect::<Result<Vec<_>>>()?; Ok(Def::Record { doc: container_attrs.doc, attrs: container_attrs.attrs, mutual_rec: container_attrs.mutual_rec, tparams: self.tparams, name, fields, }) } } } fn convert_item_enum(self, item: &syn::ItemEnum) -> Result<Def> { let name = TypeName(item.ident.to_string().to_case(Case::Snake)); let container_attrs = attr_parser::Attrs::from_enum(item); let variants = item .variants .iter() .map(|variant| { let variant_attrs = attr_parser::Attrs::from_variant(variant); let name = if let Some(name) = variant_attrs.name { VariantName(name) } else { variant_name(&variant.ident, container_attrs.prefix.as_deref()) }; let fields = match &variant.fields { syn::Fields::Unit => None, syn::Fields::Unnamed(fields) => { let mut fields = (fields.unnamed.iter()) .map(|field| self.convert_type(&field.ty)) .collect::<Result<Vec<_>>>()?; if variant_attrs.inline_tuple { assert_eq!(fields.len(), 1); let field = fields.pop().unwrap(); match field { ir::Type::Path(mut path) => { if path.targs.len() == 1 && self.config.is_transparent_type(&path) && matches!(path.targs[0], ir::Type::Tuple(..)) { if let Some(ir::Type::Tuple(tuple)) = path.targs.pop() { Some(ir::VariantFields::Unnamed(tuple.elems)) } else { unreachable!() } } else { anyhow::bail!( "Variant {} must have a single argument which is a tuple", variant.ident ) } } ir::Type::Tuple(tuple) => { Some(ir::VariantFields::Unnamed(tuple.elems)) } } } else { Some(ir::VariantFields::Unnamed(fields)) } } syn::Fields::Named(fields) => Some(ir::VariantFields::Named( (fields.named.iter()) .map(|field| { let field_attrs = attr_parser::Attrs::from_field(field); let name = if let Some(name) = field_attrs.name { FieldName(name) } else { field_name( field.ident.as_ref(), variant_attrs.prefix.as_deref(), ) }; let ty = self.convert_type(&field.ty)?; Ok(ir::Field { name, ty, doc: field_attrs.doc, attrs: field_attrs.attrs, }) }) .collect::<Result<_>>()?, )), }; Ok(ir::Variant { name, fields, doc: variant_attrs.doc, attrs: variant_attrs.attrs, }) }) .collect::<Result<Vec<_>>>()?; Ok(Def::Variant { doc: container_attrs.doc, attrs: container_attrs.attrs, mutual_rec: container_attrs.mutual_rec, tparams: self.tparams, name, variants, }) } fn convert_type(&self, ty: &syn::Type) -> Result<ir::Type> { match ty { syn::Type::Path(ty) => Ok(ir::Type::Path(self.convert_type_path(ty)?)), syn::Type::Tuple(ty) => { if ty.elems.is_empty() { Ok(ir::Type::Path(ir::TypePath::simple("unit"))) } else { Ok(ir::Type::Tuple(ir::TypeTuple { elems: (ty.elems.iter()) .map(|e| self.convert_type(e)) .collect::<Result<_>>()?, })) } } syn::Type::Reference(ty) => Ok(self.convert_type(&ty.elem)?), syn::Type::Slice(ty) => Ok(ir::Type::Path(ir::TypePath { modules: vec![], targs: vec![self.convert_type(&ty.elem)?], ty: ir::TypeName(String::from("list")), })), _ => bail!("Not supported: {:?}", ty), } } fn convert_type_path(&self, ty: &syn::TypePath) -> Result<ir::TypePath> { ensure!(ty.qself.is_none(), "Qualified self in paths not supported"); let last_seg = ty.path.segments.last().unwrap(); if ty.path.segments.len() == 1 && last_seg.arguments.is_empty() { let ident = last_seg.ident.to_string(); if self.tparams.contains(&ident) { let tparam = format!("'{}", ident.to_case(Case::Snake)); return Ok(ir::TypePath::simple(tparam)); } } let segments_except_last = ty.path.segments.iter().rev().skip(1).rev(); Ok(ir::TypePath { modules: segments_except_last .map(|seg| { ensure!( seg.arguments.is_empty(), "Type args only supported in last path segment" ); ir::ModuleName::new(seg.ident.to_string()) }) .collect::<Result<_>>()?, ty: TypeName(last_seg.ident.to_string()), targs: match &last_seg.arguments { syn::PathArguments::AngleBracketed(args) => (args.args.iter()) .filter_map(|arg| match arg { syn::GenericArgument::Type(arg) => Some(self.convert_type(arg)), _ => None, }) .collect::<Result<_>>()?, _ => vec![], }, }) } } fn field_name(ident: Option<&syn::Ident>, prefix: Option<&str>) -> FieldName { FieldName(format!("{}{}", prefix.unwrap_or_default(), ident.unwrap())) } fn variant_name(ident: &syn::Ident, prefix: Option<&str>) -> VariantName { VariantName(format!("{}{}", prefix.unwrap_or_default(), ident)) }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml/macros.rs
rust_to_ocaml/rust_to_ocaml/macros.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. /// Provide impls of `Serialize` and `Deserialize` which delegate to the impls /// of `std::fmt::Display` and `std::str::FromStr` respectively. macro_rules! serde_from_display { ($name:ident, $expecting:expr) => { impl ::serde::Serialize for $name { fn serialize<S: ::serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { serializer.serialize_str(&self.to_string()) } } impl<'de> ::serde::Deserialize<'de> for $name { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: ::serde::Deserializer<'de>, { struct Visitor; impl<'de> ::serde::de::Visitor<'de> for Visitor { type Value = $name; fn expecting( &self, formatter: &mut ::std::fmt::Formatter<'_>, ) -> ::std::fmt::Result { formatter.write_str($expecting) } fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: ::serde::de::Error, { value.parse().map_err(|e| { E::invalid_value(::serde::de::Unexpected::Other(&format!("{e}")), &self) }) } } deserializer.deserialize_str(Visitor) } } }; }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml/ir.rs
rust_to_ocaml/rust_to_ocaml/ir.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. mod display; use derive_more::Display; #[derive(Debug)] pub struct File { pub root: Module, } #[derive(Debug)] pub struct Module { pub name: ModuleName, pub defs: Vec<Def>, } #[derive(Debug)] pub enum Def { Module(Module), Alias { doc: Vec<String>, attrs: Vec<String>, mutual_rec: bool, tparams: Vec<String>, name: TypeName, ty: Type, }, Record { doc: Vec<String>, attrs: Vec<String>, mutual_rec: bool, tparams: Vec<String>, name: TypeName, fields: Vec<Field>, }, Variant { doc: Vec<String>, attrs: Vec<String>, mutual_rec: bool, tparams: Vec<String>, name: TypeName, variants: Vec<Variant>, }, } #[derive(Debug)] pub struct Variant { pub name: VariantName, pub fields: Option<VariantFields>, pub doc: Vec<String>, pub attrs: Vec<String>, } #[derive(Debug)] pub enum VariantFields { Unnamed(Vec<Type>), Named(Vec<Field>), } #[derive(Debug)] pub struct Field { pub name: FieldName, pub ty: Type, pub doc: Vec<String>, pub attrs: Vec<String>, } #[derive(Debug)] pub enum Type { Path(TypePath), Tuple(TypeTuple), } #[derive(Debug)] pub struct TypePath { pub targs: Vec<Type>, pub modules: Vec<ModuleName>, pub ty: TypeName, } impl TypePath { pub fn simple(id: impl Into<String>) -> Self { Self { modules: vec![], ty: TypeName(id.into()), targs: vec![], } } } #[derive(Debug)] pub struct TypeTuple { pub elems: Vec<Type>, } #[derive(Clone, Hash, PartialEq, Eq)] pub struct ModuleName(String); impl ModuleName { pub fn new(name: impl Into<String>) -> anyhow::Result<Self> { let name: String = name.into(); anyhow::ensure!(!name.is_empty(), "Module names must not be empty"); let first_char = name.chars().next().unwrap(); anyhow::ensure!( first_char.is_ascii(), "Module names must start with an ASCII character: {}", name ); anyhow::ensure!( first_char.to_ascii_uppercase().is_ascii_uppercase(), "Module names must start with a character which can be converted to uppercase: {}", name ); Ok(Self(name)) } pub fn as_str(&self) -> &str { self.0.as_str() } } impl std::str::FromStr for ModuleName { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<Self> { Self::new(s) } } serde_from_display!(ModuleName, "a valid Rust or OCaml module name"); #[derive(Clone, Hash, PartialEq, Eq)] pub struct TypeName(pub String); impl TypeName { pub fn as_str(&self) -> &str { self.0.as_str() } } #[derive(Clone, Hash, PartialEq, Eq)] pub struct FieldName(pub String); #[derive(Clone, Hash, PartialEq, Eq, Display)] pub struct VariantName(pub String); impl std::fmt::Debug for ModuleName { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.0, f) } } impl std::fmt::Debug for TypeName { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.0, f) } } impl std::fmt::Debug for FieldName { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.0, f) } } impl std::fmt::Debug for VariantName { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.0, f) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml/rewrite_types.rs
rust_to_ocaml/rust_to_ocaml/rewrite_types.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use convert_case::Case; use convert_case::Casing; use crate::Config; use crate::ir; pub fn rewrite_file(config: &'static Config, file: &mut ir::File) { Rewriter::rewrite_module(config, &mut file.root) } struct Rewriter { config: &'static Config, module_name: ir::ModuleName, } impl Rewriter { fn rewrite_module(config: &'static Config, module: &mut ir::Module) { let this = Self { config, module_name: module.name.clone(), }; module.defs.iter_mut().for_each(|def| this.rewrite_def(def)) } fn rewrite_def(&self, def: &mut ir::Def) { let rewrite_name = |name: &mut ir::TypeName| { if name.as_str() == self.module_name.as_str() { *name = ir::TypeName(String::from("t")); } }; match def { ir::Def::Module(module) => Self::rewrite_module(self.config, module), ir::Def::Alias { name, ty, .. } => { rewrite_name(name); self.rewrite_type(ty) } ir::Def::Record { name, fields, .. } => { rewrite_name(name); fields.iter_mut().for_each(|f| self.rewrite_field(f)) } ir::Def::Variant { name, variants, .. } => { rewrite_name(name); variants.iter_mut().for_each(|v| self.rewrite_variant(v)) } } } fn rewrite_field(&self, field: &mut ir::Field) { self.rewrite_type(&mut field.ty) } fn rewrite_variant(&self, variant: &mut ir::Variant) { variant .fields .iter_mut() .for_each(|f| self.rewrite_variant_fields(f)) } fn rewrite_variant_fields(&self, fields: &mut ir::VariantFields) { match fields { ir::VariantFields::Unnamed(tys) => tys.iter_mut().for_each(|ty| self.rewrite_type(ty)), ir::VariantFields::Named(fields) => { fields.iter_mut().for_each(|f| self.rewrite_field(f)) } } } fn rewrite_type(&self, ty: &mut ir::Type) { match ty { ir::Type::Path(path) => { if path.targs.len() == 1 && self.config.is_transparent_type(path) { *ty = path.targs.pop().unwrap(); self.rewrite_type(ty); } else { self.rewrite_type_path(path); } } ir::Type::Tuple(tuple) => self.rewrite_type_tuple(tuple), } } fn rewrite_type_path(&self, path: &mut ir::TypePath) { // Convert all integer types to `int`. The impls of ToOcamlRep // and FromOcamlRep for integer types do checked conversions, so // we'll fail at runtime if our int value doesn't fit into // OCaml's integer width. if path.modules.is_empty() && path.targs.is_empty() { match path.ty.as_str() { "i8" | "u8" | "i16" | "u16" | "i32" | "u32" | "i64" | "u64" | "i128" | "u128" | "isize" | "usize" => { path.ty = ir::TypeName(String::from("int")); return; } _ => {} } } if let Some(renamed_path) = self.config.get_renamed_type(path) { path.ty = renamed_path.ty; path.modules = renamed_path.modules; } let ty = path.ty.as_str().to_case(Case::Snake); let ty_matches_last_module_in_path = (path.modules.last()).is_some_and(|module| ty == module.as_str()); if ty_matches_last_module_in_path || ty == self.module_name.as_str() { path.ty = ir::TypeName(String::from("t")); } path.targs.iter_mut().for_each(|ty| self.rewrite_type(ty)) } fn rewrite_type_tuple(&self, tuple: &mut ir::TypeTuple) { tuple.elems.iter_mut().for_each(|ty| self.rewrite_type(ty)) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/rust_to_ocaml/ir/display.rs
rust_to_ocaml/rust_to_ocaml/ir/display.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::fmt::Display; use std::fmt::Formatter; use std::fmt::Result; use convert_case::Case; use convert_case::Casing; use crate::ir; impl Display for ir::File { fn fmt(&self, f: &mut Formatter<'_>) -> Result { self.root.fmt(f) } } impl Display for ir::Module { fn fmt(&self, f: &mut Formatter<'_>) -> Result { for def in self.defs.iter() { def.fmt(f)? } Ok(()) } } impl Display for ir::Def { fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self { Self::Module(module) => { writeln!(f, "module {} = struct", module.name)?; module.fmt(f)?; writeln!(f, "end")? } Self::Alias { doc, attrs, mutual_rec, tparams, name, ty, } => { write_toplevel_doc_comment(f, doc)?; if *mutual_rec { write!(f, "and ")?; } else { write!(f, "type ")?; } write_type_parameters(f, tparams)?; write!(f, "{name} = {ty}")?; for attr in attrs { write!(f, " [@@{}]", attr)?; } writeln!(f)?; } Self::Record { doc, attrs, mutual_rec, tparams, name, fields, } => { write_toplevel_doc_comment(f, doc)?; if *mutual_rec { write!(f, "and ")?; } else { write!(f, "type ")?; } write_type_parameters(f, tparams)?; writeln!(f, "{name} = {{")?; for field in fields { writeln!(f, " {field}")?; } write!(f, "}}")?; for attr in attrs { write!(f, " [@@{}]", attr)?; } writeln!(f)?; } Self::Variant { doc, attrs, mutual_rec, tparams, name, variants, } => { write_toplevel_doc_comment(f, doc)?; if *mutual_rec { write!(f, "and ")?; } else { write!(f, "type ")?; } write_type_parameters(f, tparams)?; writeln!(f, "{name} =")?; for variant in variants { writeln!(f, " | {variant}")?; } for attr in attrs { writeln!(f)?; write!(f, "[@@{}]", attr)?; } writeln!(f)?; } } writeln!(f)?; Ok(()) } } impl Display for ir::Variant { fn fmt(&self, f: &mut Formatter<'_>) -> Result { let Self { name, fields, doc, attrs, } = self; write!(f, "{name}")?; if let Some(fields) = fields { write!(f, " of {fields}")?; } for attr in attrs { write!(f, " [@{}]", attr)?; } write_field_or_variant_doc_comment(f, doc)?; Ok(()) } } impl Display for ir::VariantFields { fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self { Self::Unnamed(fields) => { let mut iter = fields.iter(); let ty = iter.next().expect("empty VariantFields::Unnamed"); ty.fmt(f)?; for ty in iter { write!(f, " * {ty}")?; } Ok(()) } Self::Named(fields) => { writeln!(f, "{{")?; for field in fields { writeln!(f, " {field}")?; } write!(f, "}}") } } } } impl Display for ir::Field { fn fmt(&self, f: &mut Formatter<'_>) -> Result { let Self { name, ty, doc, attrs, } = self; write!(f, "{name}: {ty};")?; for attr in attrs { write!(f, " [@{}]", attr)?; } write_field_or_variant_doc_comment(f, doc)?; Ok(()) } } impl Display for ir::Type { fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self { Self::Path(ty) => ty.fmt(f), Self::Tuple(ty) => ty.fmt(f), } } } impl Display for ir::TypePath { fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self.targs.as_slice() { [] => {} [targ] => write!(f, "{} ", targ)?, [first, rest @ ..] => { write!(f, "({}", first)?; for targ in rest { write!(f, ", {}", targ)?; } write!(f, ") ")?; } } for module in self.modules.iter() { write!(f, "{}.", module)?; } write!(f, "{}", self.ty) } } impl Display for ir::TypeTuple { fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "(")?; let mut elems = self.elems.iter(); let elem = elems.next().expect("empty TypeTuple"); write!(f, "{elem}")?; for elem in elems { write!(f, " * {elem}")?; } write!(f, ")") } } impl Display for ir::ModuleName { fn fmt(&self, f: &mut Formatter<'_>) -> Result { let name = &self.0; let mut first_char = name.chars().next().unwrap(); // Invariant: self.0 is nonempty // OCaml modules _must_ start with an uppercase letter (the OCaml parser // depends on this). We ensure in `ModuleName`'s constructor that the // first character is ASCII, so we can use `make_ascii_uppercase`. first_char.make_ascii_uppercase(); assert!(first_char.is_ascii_uppercase()); write!(f, "{}", first_char)?; write!(f, "{}", &name[1..]) } } fn is_ocaml_keyword(name: &str) -> bool { match name { "and" | "as" | "assert" | "asr" | "begin" | "class" | "constraint" | "do" | "done" | "downto" | "else" | "end" | "exception" | "external" | "false" | "for" | "fun" | "function" | "functor" | "if" | "in" | "include" | "inherit" | "initializer" | "land" | "lazy" | "let" | "lor" | "lsl" | "lsr" | "lxor" | "match" | "method" | "mod" | "module" | "mutable" | "new" | "nonrec" | "object" | "of" | "open" | "or" | "private" | "rec" | "sig" | "struct" | "then" | "to" | "true" | "try" | "type" | "val" | "virtual" | "when" | "while" | "with" => true, _ => false, } } impl Display for ir::TypeName { fn fmt(&self, f: &mut Formatter<'_>) -> Result { let name = self.0.to_case(Case::Snake); if is_ocaml_keyword(name.as_str()) { write!(f, "{}_", name) } else { name.fmt(f) } } } impl Display for ir::FieldName { fn fmt(&self, f: &mut Formatter<'_>) -> Result { let name = self.0.to_case(Case::Snake); if is_ocaml_keyword(name.as_str()) { write!(f, "{}_", name) } else { name.fmt(f) } } } fn write_toplevel_doc_comment(f: &mut std::fmt::Formatter<'_>, doc: &[String]) -> std::fmt::Result { if doc.is_empty() { return Ok(()); } write!(f, "(**{}", doc.join("\n *"))?; if doc.len() == 1 { if !doc[0].contains('\n') { write!(f, " ")?; } } else { write!(f, "\n ")?; } writeln!(f, "*)")?; Ok(()) } fn write_field_or_variant_doc_comment( f: &mut std::fmt::Formatter<'_>, doc: &[String], ) -> std::fmt::Result { if doc.is_empty() { return Ok(()); } let joined = doc.join("\n *"); write!(f, "(**{}", joined)?; if !joined.ends_with(' ') { write!(f, " ")?; } writeln!(f, "*)")?; Ok(()) } fn write_type_parameters(f: &mut std::fmt::Formatter<'_>, tparams: &[String]) -> std::fmt::Result { match tparams { [] => {} [tparam] => write!(f, "'{} ", tparam.to_case(Case::Snake))?, [first, rest @ ..] => { write!(f, "('{}", first.to_case(Case::Snake))?; for tparam in rest { write!(f, ", '{} ", tparam.to_case(Case::Snake))?; } write!(f, ") ")?; } } Ok(()) }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/test_rust_to_ocaml.rs
rust_to_ocaml/test/test_rust_to_ocaml.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::ffi::OsStr; use std::path::Path; use std::path::PathBuf; #[derive(Debug, clap::Parser)] struct Opts { /// The directory containing snapshot test cases (.rs) and expected output /// files (.rs.exp). cases: PathBuf, /// The rust_to_ocaml binary. rust_to_ocaml_bin: PathBuf, /// Update the expected output files instead of testing that the actual /// output matches the expected. #[clap(long)] update_snapshots: bool, /// Arguments which will be passed to the rust_to_ocaml binary. rust_to_ocaml_args: Vec<PathBuf>, } const IN_EXT: &str = "rs"; const EXP_EXT: &str = "exp"; fn main() -> anyhow::Result<()> { let opts = <Opts as clap::Parser>::from_args(); if opts.update_snapshots { update_snapshots(&opts)?; } else { run_test_cases(&opts)?; } Ok(()) } fn get_test_cases(dir: &Path) -> impl Iterator<Item = PathBuf> + '_ { walkdir::WalkDir::new(dir) .sort_by_file_name() .into_iter() .map(|e| e.unwrap()) .filter(|e| e.file_type().is_file()) .filter(|e| e.path().extension().and_then(OsStr::to_str) == Some(IN_EXT)) .map(|e| e.path().to_owned()) } fn expected_output_path(test_case: &Path) -> PathBuf { assert_eq!(test_case.extension().and_then(OsStr::to_str), Some(IN_EXT)); let mut path = test_case.to_owned(); path.set_extension(format!("{IN_EXT}.{EXP_EXT}")); path } fn run_test_cases(opts: &Opts) -> anyhow::Result<()> { let mut failures = vec![]; for path in get_test_cases(&opts.cases) { if let Err(diff) = run_test_case(opts, &path)? { let rel_path = path.strip_prefix(&opts.cases).unwrap_or(&path).to_owned(); eprintln!("=== TEST FAILED: {} ==================", rel_path.display()); eprintln!("{diff}"); failures.push(rel_path); } } if !failures.is_empty() { anyhow::bail!("{} test cases failed: {:?}", failures.len(), failures); } Ok(()) } fn run_test_case(opts: &Opts, test_case: &Path) -> anyhow::Result<Result<(), String>> { let exp_path = expected_output_path(test_case); let expected = std::fs::read_to_string(exp_path)?; let actual = rust_to_ocaml(opts, test_case)?; if expected == actual { Ok(Ok(())) } else { Ok(Err(similar::TextDiff::from_lines(&expected, &actual) .unified_diff() .context_radius(10) .header("expected", "actual") .to_string())) } } fn rust_to_ocaml(opts: &Opts, rust_file: &Path) -> anyhow::Result<String> { let mut cmd = std::process::Command::new(&opts.rust_to_ocaml_bin); for arg in &opts.rust_to_ocaml_args { cmd.arg(arg); } cmd.arg(rust_file); let output = cmd.output()?; let mut output_str = String::from_utf8(output.stdout)?; output_str.push_str(std::str::from_utf8(&output.stderr)?); Ok(output_str) } fn update_snapshots(opts: &Opts) -> anyhow::Result<()> { let test_cases: Vec<PathBuf> = get_test_cases(&opts.cases).collect(); println!( "Updating snapshots for {} test cases in {}", test_cases.len(), opts.cases .canonicalize() .unwrap_or_else(|_| opts.cases.clone()) .display(), ); let mut num_updated = 0; for test_case in test_cases { let exp_path = expected_output_path(&test_case); let actual = rust_to_ocaml(opts, &test_case)?; if let Ok(expected) = std::fs::read_to_string(&exp_path) { if expected == actual { continue; } } let mut expected = std::fs::File::create(&exp_path)?; std::io::Write::write_all(&mut expected, actual.as_bytes())?; num_updated += 1; } println!("Updated {num_updated} snapshots."); Ok(()) }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/struct.rs
rust_to_ocaml/test/cases/struct.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub struct MyStruct { pub foo: isize, pub bar: isize, } #[rust_to_ocaml(prefix = "a_")] pub struct StructA { pub foo: isize, pub bar: isize, } #[rust_to_ocaml(prefix = "b_")] pub struct StructB { pub foo: isize, pub bar: isize, }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/type_name_matches_module_name.rs
rust_to_ocaml/test/cases/type_name_matches_module_name.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub type A = pos::Pos; pub type B = crate::relative_path::RelativePath; pub type C = collections::s_set::SSet; pub type TypeNameMatchesModuleName = D; pub mod foo { pub type Foo = E; pub type Maybe = Option<Foo>; }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/inline_tuple.rs
rust_to_ocaml/test/cases/inline_tuple.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub enum E { Foo((A, B)), Bar(Box<(A, B)>), #[rust_to_ocaml(inline_tuple)] Baz((A, B)), #[rust_to_ocaml(inline_tuple)] Qux(Box<(A, B)>), }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/attrs.rs
rust_to_ocaml/test/cases/attrs.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. /// type X #[rust_to_ocaml(attr = "deriving show")] pub type X = A; /// type Y #[rust_to_ocaml( prefix = "y_", attr = r#"deriving visitors { variety = "iter"; ancestors = ["iter_ab"]; }"# )] pub struct Y { /// foo #[rust_to_ocaml(attr = "opaque")] #[rust_to_ocaml(attr = "visitors.opaque")] pub foo: A, /// bar pub bar: B, } /// type Visibility #[rust_to_ocaml(prefix = "V", attr = "deriving eq, ord, show { with_path = false }")] enum Visibility { /// Private #[rust_to_ocaml(attr = "visitors.name \"visibility_VPrivate\"")] Private, /// Public #[rust_to_ocaml(attr = r#"visitors.name "visibility_VPublic""#)] Public, }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/mutual_rec.rs
rust_to_ocaml/test/cases/mutual_rec.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub struct Foo(pub Bar, pub Bar); #[rust_to_ocaml(and)] pub struct Bar(pub Option<Foo>, pub Option<Foo>);
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/pointers.rs
rust_to_ocaml/test/cases/pointers.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub type BoxA = Box<A>; pub type RcA = Rc<A>; pub type ArcA = Arc<A>; pub type RcOcA = RcOc<A>; pub type StdBoxA = std::boxed::Box<A>; pub type StdRcA = std::rc::Rc<A>; pub type StdArcA = std::sync::Arc<A>; pub type OcamlrepRcOcA = ocamlrep::rc::RcOc<A>; pub type BoxedTuple = Box<(Box<A>, Box<B>)>;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/tuple_structs.rs
rust_to_ocaml/test/cases/tuple_structs.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub struct A; pub struct B(); pub struct C(()); pub struct D(pub X); pub struct E(pub Y, pub Z);
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/keywords.rs
rust_to_ocaml/test/cases/keywords.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub struct Foo { and: A, assert: A, asr: A, begin: A, class: A, constraint: A, done: A, downto: A, end: A, exception: A, external: A, fun: A, function: A, functor: A, include: A, inherit: A, initializer: A, land: A, lazy: A, lor: A, lsl: A, lsr: A, lxor: A, method: A, module: A, mutable: A, new: A, nonrec: A, object: A, of: A, open: A, or: A, private: A, rec: A, sig: A, then: A, to: A, val: A, when: A, with: A, } type A = And; type A = As; type A = Assert; type A = Asr; type A = Begin; type A = Class; type A = Constraint; type A = Do; type A = Done; type A = Downto; type A = Else; type A = End; type A = Exception; type A = External; type A = False; type A = For; type A = Fun; type A = Function; type A = Functor; type A = If; type A = In; type A = Include; type A = Inherit; type A = Initializer; type A = Land; type A = Lazy; type A = Let; type A = Lor; type A = Lsl; type A = Lsr; type A = Lxor; type A = Match; type A = Method; type A = Mod; type A = Module; type A = Mutable; type A = New; type A = Nonrec; type A = Object; type A = Of; type A = Open; type A = Or; type A = Private; type A = Rec; type A = Sig; type A = Struct; type A = Then; type A = To; type A = True; type A = Try; type A = Type; type A = Val; type A = Virtual; type A = When; type A = While; type A = With;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/lists.rs
rust_to_ocaml/test/cases/lists.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub type MyVec = Vec<X>; pub type BoxedSlice = Box<[X]>; pub type Slice<'a> = &'a [X]; pub type StdVec = std::vec::Vec<X>; pub type StdBoxedSlice = std::boxed::Box<[X]>;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/type_args.rs
rust_to_ocaml/test/cases/type_args.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub type MaybeA = Option<A>; pub type MyResult = Result<B, C>; pub type Bar = foo::Bar<D, E, F>;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/qualified_name.rs
rust_to_ocaml/test/cases/qualified_name.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub type A = x::Foo; pub type B = y::z::Foo; pub type C = my_module::some_submodule::Foo; pub type D = i_map::IMap<X>;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/variants.rs
rust_to_ocaml/test/cases/variants.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub enum A { I, J(isize), K(isize, isize), L((isize, isize)), M { x: isize, y: isize }, } #[rust_to_ocaml(prefix = "P")] pub enum Prefixed { I, J(isize), K(isize, isize), L((isize, isize)), #[rust_to_ocaml(prefix = "m_")] M { x: isize, y: isize, }, }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/tuple.rs
rust_to_ocaml/test/cases/tuple.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. type TupleA = (A, B); type TupleB = (A, (B, C));
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/option.rs
rust_to_ocaml/test/cases/option.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub enum MyOption<A> { None, Some(A), }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/int.rs
rust_to_ocaml/test/cases/int.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub type A = i8; pub type B = u8; pub type C = i16; pub type D = u16; pub type E = i32; pub type F = u32; pub type G = i64; pub type H = u64; pub type I = i128; pub type J = u128; pub type K = isize; pub type L = usize;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/my_result.rs
rust_to_ocaml/test/cases/my_result.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub enum Result<T, E> { Ok(T), Err(E), }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/name_attribute.rs
rust_to_ocaml/test/cases/name_attribute.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #[rust_to_ocaml(name = "aa")] // ignored type A = X; #[rust_to_ocaml(name = "bb")] // ignored struct B { #[rust_to_ocaml(name = "bb_x")] foo: x, #[rust_to_ocaml(name = "bb_y")] bar: y, } #[rust_to_ocaml(name = "cc")] // ignored #[rust_to_ocaml(prefix = "C")] enum C { #[rust_to_ocaml(name = "C_foo")] Foo, #[rust_to_ocaml(name = "Bar")] Bar { #[rust_to_ocaml(name = "bar_x")] foo: x, #[rust_to_ocaml(name = "bar_y")] bar: y, }, Baz, } type a_alias = a; type b_alias = b; type c_alias = c;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/doc_comment.rs
rust_to_ocaml/test/cases/doc_comment.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. /// Type A pub type A = X; /// Type B /// is int pub type B = X; /// Type C has a fenced code block: /// /// ``` /// function f(): int { /// return 0; /// } /// ``` /// /// And an unfenced code block: /// /// function g(): int { /// return 0; /// } /// /// It should stay indented. pub type C = X; /** Type D has a multiline delimited comment: ``` function f(): int { return 0; } ``` And an indented code block: ``` function g(): int { return 0; } ``` */ pub type D = X; /// Records can have comments on the fields. pub struct Record { /// The comments come after the field declaration in OCaml. pub foo: X, /// bar comment pub bar: X, } /// Variant types can have comments on each variant. pub enum Variant { /// Again, the comments come after the variant declaration. /// Multiline comments are understood. Foo, /** Bar has a multiline delimited comment, even though it's unusual in Rust source. */ Bar, /// Baz comment Baz { a: X, b: X }, /// Qux is a struct-like variant with a long comment spanning /// multiple lines. #[rust_to_ocaml(prefix = "q_")] Qux { a: X, b: X }, }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/bool_alias.rs
rust_to_ocaml/test/cases/bool_alias.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub type A = bool;
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/test/cases/inline_tuple_bad.rs
rust_to_ocaml/test/cases/inline_tuple_bad.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. pub enum E { #[rust_to_ocaml(inline_tuple)] Foo(Box<A>), }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/rust_to_ocaml/attr_parser/attr_parser.rs
rust_to_ocaml/attr_parser/attr_parser.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use syn::Meta::List; use syn::Meta::NameValue; use syn::Meta::Path; use syn::NestedMeta::Lit; use syn::NestedMeta::Meta; static DOC: &str = "doc"; static RUST_TO_OCAML: &str = "rust_to_ocaml"; static PREFIX: &str = "prefix"; static AND: &str = "and"; static ATTR: &str = "attr"; static NAME: &str = "name"; static INLINE_TUPLE: &str = "inline_tuple"; /// The attributes understood by `rust_to_ocaml`. #[derive(Clone, Debug)] pub struct Attrs { /// Doc comments (and their desugared form, the `#[doc]` attribute) are /// picked up by rust_to_ocaml and included as ocamldoc comments. /// /// /// Type A /// pub type A = X; /// /// is converted to: /// /// (** Type A *) /// type a = x pub doc: Vec<String>, /// Sometimes OCaml programs use prefixes to distinguish fields of the same /// name in different records (avoiding OCaml warning 30). The `prefix` /// attribute (on a declaration of a struct or a struct-like enum variant) /// indicates the prefix that should be added to each field name. /// /// #[rust_to_ocaml(prefix = "a_")] /// pub struct StructA { pub foo: isize, pub bar: isize } /// #[rust_to_ocaml(prefix = "b_")] /// pub struct StructB { pub foo: isize, pub bar: isize } /// /// is converted to: /// /// type struct_a = { a_foo: int; a_bar: int; } /// type struct_b = { b_foo: int; b_bar: int; } pub prefix: Option<String>, /// OCaml attributes (in OCaml syntax) to annotate a type or field /// declaration with in the generated OCaml. /// /// #[rust_to_ocaml(attr = "deriving show")] /// pub type X = A; /// /// is converted to: /// /// type x = a [@@deriving show] pub attrs: Vec<String>, /// Mutual recursion in type definitions is opt-in in OCaml; one writes /// `type x = y list and y = x list` rather than `type x = y list ;; type y /// = x list` (which is an error because the type name `y` is not bound in /// the declaration of `x`). Use the `#[rust_to_ocaml(and)]` attribute to /// indicate when the `and` keyword should be used to continue a mutually /// recursive type declaration. /// /// pub struct Foo(pub Bar, pub Bar); /// #[rust_to_ocaml(and)] /// pub struct Bar(pub Option<Foo>, pub Option<Foo>); /// /// is converted to: /// /// type foo = bar * bar /// and bar = foo option * foo option pub mutual_rec: bool, /// Normally, rust_to_ocaml will convert the names of fields and enum /// variants by attempting to convert idiomatic Rust casing to idiomatic /// OCaml casing. Use the `#[rust_to_ocaml(name = "my_name")]` attribute to /// override this behavior and provide some other name. This attribute takes /// precedence over the `prefix` attribute (no prefix will be applied to the /// given name). This attribute cannot be used to rename types (use /// rust_to_ocaml_config.toml instead). pub name: Option<String>, /// In OCaml, a variant declared as `Foo of (a * b)` is a variant with one /// field which is a pointer to a heap-allocated tuple. A variant declared /// as `Baz of a * b` is a variant with two fields of type `a` and `b`. /// /// By default, rust_to_ocaml will produce variants with a single field. But /// this behavior can be overridden with the `inline_tuple` attribute, /// converting the fields of a tuple (possibly behind a reference, `Box`, or /// any other wrapper type declared in the `types.transparent` section in /// rust_to_ocaml_config.toml) to fields of the OCaml variant. /// /// pub enum E { /// Foo((A, B)), /// Bar(Box<(A, B)>), /// #[rust_to_ocaml(inline_tuple)] /// Baz((A, B)), /// #[rust_to_ocaml(inline_tuple)] /// Qux(Box<(A, B)>), /// } /// /// is converted to: /// /// type e = /// | Foo of (a * b) /// | Bar of (a * b) /// | Baz of a * b /// | Qux of a * b pub inline_tuple: bool, } impl Attrs { pub fn from_type(item: &syn::ItemType) -> Self { Self::from_attributes(&item.attrs, AttrKind::Container) } pub fn from_struct(item: &syn::ItemStruct) -> Self { Self::from_attributes(&item.attrs, AttrKind::Container) } pub fn from_enum(item: &syn::ItemEnum) -> Self { Self::from_attributes(&item.attrs, AttrKind::Container) } pub fn from_variant(variant: &syn::Variant) -> Self { Self::from_attributes(&variant.attrs, AttrKind::Variant) } pub fn from_field(field: &syn::Field) -> Self { Self::from_attributes(&field.attrs, AttrKind::Field) } fn from_attributes(attrs: &[syn::Attribute], kind: AttrKind) -> Self { let doc = get_doc_comment(attrs); let mut prefix = None; let mut ocaml_attrs = vec![]; let mut mutual_rec = false; let mut name = None; let mut inline_tuple = false; for meta_item in attrs .iter() .flat_map(get_rust_to_ocaml_meta_items) .flatten() { match &meta_item { // Parse `#[rust_to_ocaml(prefix = "foo")]` Meta(NameValue(m)) if m.path.is_ident(PREFIX) => { // TODO: emit error for AttrKind::Field (should use the // `name` meta item instead) if let Ok(s) = get_lit_str(PREFIX, &m.lit) { prefix = Some(s.value()); } } // Parse `#[rust_to_ocaml(attr = "deriving eq")]` Meta(NameValue(m)) if m.path.is_ident(ATTR) => { if let Ok(s) = get_lit_str(ATTR, &m.lit) { ocaml_attrs.push(s.value()); } } // Parse `#[rust_to_ocaml(and)]` Meta(Path(word)) if word.is_ident(AND) => { // TODO: emit an error instead assert_eq!(kind, AttrKind::Container); mutual_rec = true; } // Parse `#[rust_to_ocaml(name = "foo")]` Meta(NameValue(m)) if m.path.is_ident(NAME) => { // TODO: emit error for AttrKind::Container (should add to // types.rename config instead) if let Ok(s) = get_lit_str(NAME, &m.lit) { name = Some(s.value()); } } // Parse `#[rust_to_ocaml(inline_tuple)]` Meta(Path(word)) if word.is_ident(INLINE_TUPLE) => { // TODO: emit an error instead assert_eq!(kind, AttrKind::Variant); inline_tuple = true; } Meta(_meta_item) => { // let path = meta_item // .path() // .into_token_stream() // .to_string() // .replace(' ', ""); // cx.error_spanned_by( // meta_item.path(), // format!("unknown rust_to_ocaml {} attribute `{}`", kind, path), // ); } Lit(_lit) => { // cx.error_spanned_by(lit, format!("unexpected literal in rust_to_ocaml {} attribute", kind)); } } } Self { doc, prefix, attrs: ocaml_attrs, mutual_rec, name, inline_tuple, } } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] enum AttrKind { Container, Variant, Field, } impl std::fmt::Display for AttrKind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Container => write!(f, "container"), Self::Variant => write!(f, "variant"), Self::Field => write!(f, "field"), } } } pub fn get_doc_comment(attrs: &[syn::Attribute]) -> Vec<String> { attrs .iter() .filter_map(|attr| { if !attr.path.is_ident(DOC) { return None; } match attr.parse_meta() { Ok(syn::Meta::NameValue(meta)) => { if let syn::Lit::Str(s) = meta.lit { Some(s.value()) } else { None } } _ => None, } }) .collect() } fn get_rust_to_ocaml_meta_items(attr: &syn::Attribute) -> Result<Vec<syn::NestedMeta>, ()> { if !attr.path.is_ident(RUST_TO_OCAML) { return Ok(vec![]); } match attr.parse_meta() { Ok(List(meta)) => Ok(meta.nested.into_iter().collect()), Ok(_other) => { // cx.error_spanned_by(other, "expected #[rust_to_ocaml(...)]"); Err(()) } Err(_err) => { // cx.syn_error(err); Err(()) } } } fn get_lit_str<'a>(_attr_name: &'static str, lit: &'a syn::Lit) -> Result<&'a syn::LitStr, ()> { if let syn::Lit::Str(lit) = lit { Ok(lit) } else { // cx.error_spanned_by( // lit, // format!( // "expected rust_to_ocaml {} attribute to be a string: `{} = \"...\"`", // attr_name, attr_name // ), // ); Err(()) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_derive/lib.rs
ocamlrep_derive/lib.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![recursion_limit = "128"] use proc_macro2::TokenStream; use quote::quote; use syn::Attribute; use syn::Meta; use syn::NestedMeta; use synstructure::BindingInfo; use synstructure::VariantInfo; use synstructure::decl_derive; // The rust_to_ocaml_attr crate provides the rust_to_ocaml attribute macro, // which is intended to be consumed by the rust_to_ocaml codegen tool. It // doesn't currently control the behavior of any derived ocamlrep trait impls. // // Unfortunately, rust_to_ocaml_attr does not strip the attribute macro from // positions where attribute macros are not allowed (like field definitions). // The easiest way to do that is to ask proc_macro_derive to do it, but that // requires the use of a derive macro. // // Since all of the types we are interested in passing to rust_to_ocaml // implement an ocamlrep trait, ask proc_macro_derive (via decl_derive) to strip // rust_to_ocaml attributes when deriving ocamlrep traits. // // Even with this stripping, the rust_to_ocaml_attr crate is still required to // strip the attribute from type aliases, which cannot use derive macros. decl_derive!([ToOcamlRep, attributes(rust_to_ocaml, ocamlrep)] => derive_to_ocamlrep); decl_derive!([FromOcamlRep, attributes(rust_to_ocaml, ocamlrep)] => derive_from_ocamlrep); decl_derive!([FromOcamlRepIn, attributes(rust_to_ocaml, ocamlrep)] => derive_from_ocamlrep_in); fn workaround_non_local_def(impl_block: TokenStream) -> TokenStream { // We need to upgrade synstructure to remove this warning, but doing so will also require upgrading // to syn2 and rewriting to handle the API changes. quote! { #[allow(non_local_definitions)] #impl_block } } fn derive_to_ocamlrep(mut s: synstructure::Structure<'_>) -> TokenStream { // remove #[ocamlrep(skip)] for variant in s.variants_mut() { variant.filter(|bi| !has_ocamlrep_skip_attr(&bi.ast().attrs)); } // By default, if you are deriving an impl of trait Foo for generic type // X<T>, synstructure will add Foo as a bound not only for the type // parameter T, but also for every type which appears as a field in X. This // is not necessary for our use case--we can just require that the type // parameters implement our trait. s.add_bounds(synstructure::AddBounds::Generics); let to_body = to_ocamlrep_body(&s); workaround_non_local_def(s.gen_impl(quote! { gen impl ::ocamlrep::ToOcamlRep for @Self { fn to_ocamlrep<'__ocamlrep_derive_allocator, Alloc: ::ocamlrep::Allocator>( &'__ocamlrep_derive_allocator self, arena: &'__ocamlrep_derive_allocator Alloc, ) -> ::ocamlrep::Value<'__ocamlrep_derive_allocator> { use ::ocamlrep::Allocator; match *self { #to_body } } } })) } fn derive_from_ocamlrep(mut s: synstructure::Structure<'_>) -> TokenStream { s.add_bounds(synstructure::AddBounds::Generics); let from_body = from_ocamlrep_body(&mut s); workaround_non_local_def(s.gen_impl(quote! { gen impl ::ocamlrep::FromOcamlRep for @Self { fn from_ocamlrep(value: ::ocamlrep::Value<'_>) -> ::std::result::Result<Self, ::ocamlrep::FromError> { use ::ocamlrep::FromOcamlRep; #from_body } } })) } fn derive_from_ocamlrep_in(mut s: synstructure::Structure<'_>) -> TokenStream { s.add_bounds(synstructure::AddBounds::Generics); if s.ast().generics.lifetimes().next().is_none() { s.add_bounds(synstructure::AddBounds::None); let tparams = s.ast().generics.type_params(); let tparams_implement_from_ocamlrep: TokenStream = tparams .map(|t| quote!(#t : ::ocamlrep::FromOcamlRep,)) .collect(); let from_body = from_ocamlrep_body(&mut s); return workaround_non_local_def(s.gen_impl(quote! { gen impl<'__ocamlrep_derive_allocator> ::ocamlrep::FromOcamlRepIn<'__ocamlrep_derive_allocator> for @Self where #tparams_implement_from_ocamlrep { fn from_ocamlrep_in( value: ::ocamlrep::Value<'_>, alloc: &'__ocamlrep_derive_allocator ::ocamlrep::Bump, ) -> ::std::result::Result<Self, ::ocamlrep::FromError> { use ::ocamlrep::FromOcamlRep; #from_body } } })); } // Constrain the lifetime of `'__ocamlrep_derive_allocator` to be equal to // any declared lifetimes. This is so that we can reference the lifetime // parameter to `FromOcamlRepIn` without requiring implementors to use a // certain name for their lifetime parameter. let lifetimes = s.ast().generics.lifetimes(); let lifetimes: TokenStream = lifetimes .map(|l| { quote! { '__ocamlrep_derive_allocator : #l, #l : '__ocamlrep_derive_allocator, } }) .collect(); let tparams = s.ast().generics.type_params(); let tparams_implement_trivialdrop: TokenStream = tparams .map(|t| quote!(#t : ::arena_trait::TrivialDrop,)) .collect(); let from_in_body = from_ocamlrep_in_body(&mut s); workaround_non_local_def(s.gen_impl(quote! { gen impl<'__ocamlrep_derive_allocator> ::ocamlrep::FromOcamlRepIn<'__ocamlrep_derive_allocator> for @Self where #tparams_implement_trivialdrop #lifetimes { fn from_ocamlrep_in( value: ::ocamlrep::Value<'_>, alloc: &'__ocamlrep_derive_allocator ::ocamlrep::Bump, ) -> ::std::result::Result<Self, ::ocamlrep::FromError> { use ::ocamlrep::FromOcamlRepIn; #from_in_body } } })) } fn to_ocamlrep_body(s: &synstructure::Structure<'_>) -> TokenStream { match &s.ast().data { syn::Data::Struct(struct_data) => struct_to_ocamlrep(s, struct_data), syn::Data::Enum(_) => enum_to_ocamlrep(s, collect_enum_variants(s)), syn::Data::Union(_) => panic!("untagged unions not supported"), } } fn from_ocamlrep_body(s: &mut synstructure::Structure<'_>) -> TokenStream { match &s.ast().data { syn::Data::Struct(struct_data) => struct_from_ocamlrep(s, struct_data, false), syn::Data::Enum(_) => enum_from_ocamlrep(collect_enum_variants(s), false), syn::Data::Union(_) => panic!("untagged unions not supported"), } } fn from_ocamlrep_in_body(s: &mut synstructure::Structure<'_>) -> TokenStream { match &s.ast().data { syn::Data::Struct(struct_data) => struct_from_ocamlrep(s, struct_data, true), syn::Data::Enum(_) => enum_from_ocamlrep(collect_enum_variants(s), true), syn::Data::Union(_) => panic!("untagged unions not supported"), } } fn struct_to_ocamlrep( s: &synstructure::Structure<'_>, struct_data: &syn::DataStruct, ) -> TokenStream { match struct_data.fields { syn::Fields::Unit => { // Represent unit structs with unit. s.each_variant(|_| quote! { arena.add(&()) }) } syn::Fields::Unnamed(ref fields) if fields.unnamed.len() == 1 => { // For the newtype pattern (a tuple struct with a single field), // don't allocate a block--just use the inner value directly. s.each(|bi| quote! { arena.add(#bi) }) } syn::Fields::Named(_) | syn::Fields::Unnamed(_) => { // Otherwise, we have a record-like struct or a tuple struct. Both // are represented with a block. s.each_variant(|v| allocate_block(v, 0)) } } } /// Fetch all the parameters from ocamlrep attributes: /// #[ocamlrep(foo, bar), ocamlrep(baz)] /// yields: /// [foo, bar, baz] fn parse_ocamlrep_attr(attrs: &[Attribute]) -> Option<Vec<NestedMeta>> { let mut res = None; for attr in attrs { let meta = attr.parse_meta().unwrap(); match meta { Meta::Path(_) => { // #[foo] } Meta::List(list) => { // #[foo(bar)] if list.path.is_ident("ocamlrep") { res.get_or_insert_with(Vec::new).extend(list.nested); } } Meta::NameValue(_) => { // #[foo = bar] } } } res } /// Returns true if the attributes contain an `#[ocamlrep(skip)]` fn has_ocamlrep_skip_attr(attrs: &[Attribute]) -> bool { if let Some(ocamlrep) = parse_ocamlrep_attr(attrs) { for rep in ocamlrep { match rep { NestedMeta::Meta(Meta::Path(path)) if path.is_ident("skip") => { return true; } _ => {} } } } false } fn struct_from_ocamlrep( s: &mut synstructure::Structure<'_>, struct_data: &syn::DataStruct, from_in: bool, ) -> TokenStream { let variant = &mut s.variants_mut()[0]; match struct_data.fields { syn::Fields::Unit => { let constructor = variant.construct(|_, _| quote!(unreachable!())); quote! { <()>::from_ocamlrep(value)?; Ok(#constructor) } } syn::Fields::Unnamed(ref fields) if fields.unnamed.len() == 1 => { let constructor = variant.construct(|field, _| { let ty = &field.ty; if from_in { quote! { <#ty>::from_ocamlrep_in(value, alloc)? } } else { quote! { <#ty>::from_ocamlrep(value)? } } }); quote! { Ok(#constructor) } } syn::Fields::Named(_) | syn::Fields::Unnamed(_) => { let mut binding = 0; let constructor = variant.construct(|field, _| { if has_ocamlrep_skip_attr(&field.attrs) { quote!(::std::default::Default::default()) } else { let idx = binding; binding += 1; field_constructor(idx, from_in) } }); quote! { let block = ::ocamlrep::from::expect_tuple(value, #binding)?; Ok(#constructor) } } } } struct EnumVariants<'a> { nullary_variants: Vec<(&'a synstructure::VariantInfo<'a>, isize)>, block_variants: Vec<(&'a synstructure::VariantInfo<'a>, isize)>, } fn collect_enum_variants<'a>(s: &'a synstructure::Structure<'_>) -> EnumVariants<'a> { // For tagging purposes, variant constructors of zero arguments are numbered // separately from variant constructors of one or more arguments, so we need // to count them separately to learn their tags. let mut nullary_variants = vec![]; let mut block_variants = vec![]; for variant in s.variants().iter() { if variant.bindings().is_empty() { nullary_variants.push((variant, nullary_variants.len() as isize)); } else { block_variants.push((variant, block_variants.len() as isize)); }; } // Block tags larger than this value indicate specific OCaml types (and tags // larger than 255 wouldn't fit in a u8 anyway). // See https://github.com/ocaml/ocaml/blob/3.08/utils/config.mlp#L55 assert!( block_variants.len() <= 246, "Too many non-constant enum variants -- maximum is 246" ); EnumVariants { nullary_variants, block_variants, } } fn enum_to_ocamlrep(s: &synstructure::Structure<'_>, variants: EnumVariants<'_>) -> TokenStream { let EnumVariants { nullary_variants, mut block_variants, } = variants; let mut all_variants = nullary_variants; all_variants.append(&mut block_variants); s.each_variant(|v| { let size = v.bindings().len(); let tag = { all_variants .iter() .find(|(var, _)| *var == v) .map(|(_, tag)| *tag) .unwrap() }; if size == 0 { quote!(::ocamlrep::Value::int(#tag)) } else { let tag = tag as u8; match get_boxed_tuple_len(v) { None => allocate_block(v, tag), Some(len) => boxed_tuple_variant_to_block(&v.bindings()[0], tag, len), } } }) } fn enum_from_ocamlrep(variants: EnumVariants<'_>, from_in: bool) -> TokenStream { let EnumVariants { nullary_variants, block_variants, } = variants; let max_nullary_tag = nullary_variants.len().saturating_sub(1); let max_block_tag = block_variants.len().saturating_sub(1) as u8; let mut nullary_arms = TokenStream::new(); for (variant, tag) in nullary_variants.iter() { let constructor = variant.construct(|_, _| quote!(unreachable!())); nullary_arms.extend(quote! { #tag => Ok(#constructor), }); } nullary_arms.extend(quote! { tag => Err(::ocamlrep::FromError::NullaryVariantTagOutOfRange { max: #max_nullary_tag, actual: tag, }) }); let mut block_arms = TokenStream::new(); for (variant, tag) in block_variants.iter() { let tag = *tag as u8; let (size, constructor) = match get_boxed_tuple_len(variant) { None => ( variant.bindings().len(), variant.construct(|_, i| field_constructor(i, from_in)), ), Some(len) => (len, boxed_tuple_variant_constructor(variant, len, from_in)), }; block_arms.extend(quote! { #tag => { ::ocamlrep::from::expect_block_size(block, #size)?; Ok(#constructor) } }); } block_arms.extend(quote! { tag => Err(::ocamlrep::FromError::BlockTagOutOfRange { max: #max_block_tag, actual: tag, }) }); match (nullary_variants.is_empty(), block_variants.is_empty()) { // An enum with no variants is not instantiable. (true, true) => panic!("cannot derive OcamlRep for non-instantiable enum"), // Nullary variants only. (false, true) => quote! { match ::ocamlrep::from::expect_int(value)? { #nullary_arms } }, // Block variants only. (true, false) => quote! { let block = ::ocamlrep::from::expect_block(value)?; match block.tag() { #block_arms } }, // Both nullary and block variants. (false, false) => quote! { if value.is_int() { match value.as_int().unwrap() { #nullary_arms } } else { let block = value.as_block().unwrap(); match block.tag() { #block_arms } } }, } } fn allocate_block(variant: &VariantInfo<'_>, tag: u8) -> TokenStream { let size = variant.bindings().len(); let mut fields = TokenStream::new(); for (i, bi) in variant.bindings().iter().enumerate() { fields.extend(quote! { arena.set_field(&mut block, #i, arena.add(#bi)); }); } quote! { let mut block = arena.block_with_size_and_tag(#size, #tag); #fields block.build() } } fn boxed_tuple_variant_to_block(bi: &BindingInfo<'_>, tag: u8, len: usize) -> TokenStream { let mut fields = TokenStream::new(); for i in 0..len { let idx = syn::Index::from(i); fields.extend(quote! { arena.set_field(&mut block, #i, arena.add(&#bi.#idx)); }); } quote! { let mut block = arena.block_with_size_and_tag(#len, #tag); #fields block.build() } } fn field_constructor(index: usize, from_in: bool) -> TokenStream { if from_in { quote! { ::ocamlrep::from::field_in(block, #index, alloc)? } } else { quote! { ::ocamlrep::from::field(block, #index)? } } } fn boxed_tuple_variant_constructor( variant: &VariantInfo<'_>, len: usize, from_in: bool, ) -> TokenStream { let mut ident = TokenStream::new(); if let Some(prefix) = variant.prefix { ident.extend(quote!(#prefix ::)); } let id = variant.ast().ident; ident.extend(quote!(#id)); let mut fields = TokenStream::new(); for idx in 0..len { fields.extend(if from_in { quote! { ::ocamlrep::from::field_in(block, #idx, alloc)?, } } else { quote! { ::ocamlrep::from::field(block, #idx)?, } }) } if from_in { quote! { #ident(alloc.alloc((#fields))) } } else { quote! { #ident(::std::boxed::Box::new((#fields))) } } } fn get_boxed_tuple_len(variant: &VariantInfo<'_>) -> Option<usize> { use syn::Fields; use syn::GenericArgument; use syn::PathArguments; use syn::Type; use syn::TypePath; use syn::TypeReference; match &variant.ast().fields { Fields::Unnamed(_) => {} _ => return None, } let bi = match variant.bindings() { [bi] => bi, _ => return None, }; let tuple = match &bi.ast().ty { Type::Path(TypePath { path, .. }) => { let path_seg = match path.segments.first() { Some(s) if s.ident == "Box" => s, _ => return None, }; let args = match &path_seg.arguments { PathArguments::AngleBracketed(args) => args, _ => return None, }; match args.args.first() { Some(GenericArgument::Type(Type::Tuple(tuple))) => tuple, _ => return None, } } Type::Reference(TypeReference { elem, .. }) => match &**elem { Type::Tuple(tuple) => tuple, _ => return None, }, _ => return None, }; Some(tuple.elems.len()) } #[cfg(test)] mod tests { use anyhow::Result; use macro_test_util::assert_pat_eq; use synstructure::Structure; use super::*; #[test] fn basic_to() -> Result<()> { let input = quote! { struct A { a: i64, b: i64, #[ocamlrep(skip)] c: f64, d: String, } }; assert_pat_eq::<anyhow::Error>( Ok(derive_to_ocamlrep(Structure::new(&syn::parse2(input)?))), quote! { #[allow(non_local_definitions)] #[allow(non_upper_case_globals)] const _DERIVE_ocamlrep_ToOcamlRep_FOR_A: () = { impl ::ocamlrep::ToOcamlRep for A { fn to_ocamlrep<'__ocamlrep_derive_allocator, Alloc: ::ocamlrep::Allocator>( &'__ocamlrep_derive_allocator self, arena: &'__ocamlrep_derive_allocator Alloc, ) -> ::ocamlrep::Value<'__ocamlrep_derive_allocator> { use ::ocamlrep::Allocator; match *self { A { a: ref __binding_0, b: ref __binding_1, d: ref __binding_3, .. } => { let mut block = arena.block_with_size_and_tag(3usize, 0u8); arena.set_field(&mut block, 0usize, arena.add(__binding_0)); arena.set_field(&mut block, 1usize, arena.add(__binding_1)); arena.set_field(&mut block, 2usize, arena.add(__binding_3)); block.build() } } } } }; }, ); Ok(()) } #[test] fn basic_from() -> Result<()> { let input = quote! { struct A { a: i64, b: i64, #[ocamlrep(skip)] c: f64, d: String, } }; assert_pat_eq::<anyhow::Error>( Ok(derive_from_ocamlrep(Structure::new(&syn::parse2(input)?))), quote! { #[allow(non_local_definitions)] #[allow(non_upper_case_globals)] const _DERIVE_ocamlrep_FromOcamlRep_FOR_A: () = { impl ::ocamlrep::FromOcamlRep for A { fn from_ocamlrep( value: ::ocamlrep::Value<'_> ) -> ::std::result::Result<Self, ::ocamlrep::FromError> { use ::ocamlrep::FromOcamlRep; let block = ::ocamlrep::from::expect_tuple(value, 3usize)?; Ok(A { a: ::ocamlrep::from::field(block, 0usize)?, b: ::ocamlrep::from::field(block, 1usize)?, c: ::std::default::Default::default(), d: ::ocamlrep::from::field(block, 2usize)?, }) } } }; }, ); Ok(()) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_caml_builtins/lib.rs
ocamlrep_caml_builtins/lib.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use ocamlrep::*; use ocamlrep_custom::CustomOperations; use serde::Deserialize; use serde::Serialize; /// `Int64.t` conversion guide. /// /// Communicates that when the wrapped i64 is converted to an OCaml /// representation, the type `Int64.t` (representing a boxed 64-bit integer) /// should be used rather than the type `int` (which, on 64-bit architectures, /// is an unboxed 63-bit integer). #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct Int64(pub i64); unsafe extern "C" { static mut caml_int64_ops: CustomOperations; } impl From<i64> for Int64 { fn from(x: i64) -> Self { Self(x) } } impl From<Int64> for i64 { fn from(x: Int64) -> i64 { x.0 } } impl ToOcamlRep for Int64 { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> Value<'a> { let mut block = alloc.block_with_size_and_tag(2, CUSTOM_TAG); alloc.set_field(&mut block, 0, unsafe { Value::from_bits(std::ptr::addr_of!(caml_int64_ops) as usize) }); alloc.set_field(&mut block, 1, unsafe { Value::from_bits(self.0 as usize) }); block.build() } } impl FromOcamlRep for Int64 { fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> { let block = from::expect_block_with_size_and_tag(value, 2, CUSTOM_TAG)?; Ok(Self(block[1].to_bits() as i64)) } } impl<'a> FromOcamlRepIn<'a> for Int64 { fn from_ocamlrep_in(value: Value<'_>, _alloc: &'a ocamlrep::Bump) -> Result<Self, FromError> { Self::from_ocamlrep(value) } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/shim/third-party/rust/fixups/libsqlite3-sys/bindgen.rs
shim/third-party/rust/fixups/libsqlite3-sys/bindgen.rs
/* automatically generated by rust-bindgen 0.69.4 */ extern "C" { pub fn sqlite3_auto_extension( xEntryPoint: ::std::option::Option< unsafe extern "C" fn( db: *mut sqlite3, pzErrMsg: *mut *mut ::std::os::raw::c_char, _: *const sqlite3_api_routines, ) -> ::std::os::raw::c_int, >, ) -> ::std::os::raw::c_int; } extern "C" { pub fn sqlite3_cancel_auto_extension( xEntryPoint: ::std::option::Option< unsafe extern "C" fn( db: *mut sqlite3, pzErrMsg: *mut *mut ::std::os::raw::c_char, _: *const sqlite3_api_routines, ) -> ::std::os::raw::c_int, >, ) -> ::std::os::raw::c_int; } pub const SQLITE_VERSION: &[u8; 7] = b"3.46.0\0"; pub const SQLITE_VERSION_NUMBER: i32 = 3046000; pub const SQLITE_SOURCE_ID: &[u8; 85] = b"2024-05-23 13:25:27 96c92aba00c8375bc32fafcdf12429c58bd8aabfcadab6683e35bbb9cdebf19e\0"; pub const SQLITE_OK: i32 = 0; pub const SQLITE_ERROR: i32 = 1; pub const SQLITE_INTERNAL: i32 = 2; pub const SQLITE_PERM: i32 = 3; pub const SQLITE_ABORT: i32 = 4; pub const SQLITE_BUSY: i32 = 5; pub const SQLITE_LOCKED: i32 = 6; pub const SQLITE_NOMEM: i32 = 7; pub const SQLITE_READONLY: i32 = 8; pub const SQLITE_INTERRUPT: i32 = 9; pub const SQLITE_IOERR: i32 = 10; pub const SQLITE_CORRUPT: i32 = 11; pub const SQLITE_NOTFOUND: i32 = 12; pub const SQLITE_FULL: i32 = 13; pub const SQLITE_CANTOPEN: i32 = 14; pub const SQLITE_PROTOCOL: i32 = 15; pub const SQLITE_EMPTY: i32 = 16; pub const SQLITE_SCHEMA: i32 = 17; pub const SQLITE_TOOBIG: i32 = 18; pub const SQLITE_CONSTRAINT: i32 = 19; pub const SQLITE_MISMATCH: i32 = 20; pub const SQLITE_MISUSE: i32 = 21; pub const SQLITE_NOLFS: i32 = 22; pub const SQLITE_AUTH: i32 = 23; pub const SQLITE_FORMAT: i32 = 24; pub const SQLITE_RANGE: i32 = 25; pub const SQLITE_NOTADB: i32 = 26; pub const SQLITE_NOTICE: i32 = 27; pub const SQLITE_WARNING: i32 = 28; pub const SQLITE_ROW: i32 = 100; pub const SQLITE_DONE: i32 = 101; pub const SQLITE_ERROR_MISSING_COLLSEQ: i32 = 257; pub const SQLITE_ERROR_RETRY: i32 = 513; pub const SQLITE_ERROR_SNAPSHOT: i32 = 769; pub const SQLITE_IOERR_READ: i32 = 266; pub const SQLITE_IOERR_SHORT_READ: i32 = 522; pub const SQLITE_IOERR_WRITE: i32 = 778; pub const SQLITE_IOERR_FSYNC: i32 = 1034; pub const SQLITE_IOERR_DIR_FSYNC: i32 = 1290; pub const SQLITE_IOERR_TRUNCATE: i32 = 1546; pub const SQLITE_IOERR_FSTAT: i32 = 1802; pub const SQLITE_IOERR_UNLOCK: i32 = 2058; pub const SQLITE_IOERR_RDLOCK: i32 = 2314; pub const SQLITE_IOERR_DELETE: i32 = 2570; pub const SQLITE_IOERR_BLOCKED: i32 = 2826; pub const SQLITE_IOERR_NOMEM: i32 = 3082; pub const SQLITE_IOERR_ACCESS: i32 = 3338; pub const SQLITE_IOERR_CHECKRESERVEDLOCK: i32 = 3594; pub const SQLITE_IOERR_LOCK: i32 = 3850; pub const SQLITE_IOERR_CLOSE: i32 = 4106; pub const SQLITE_IOERR_DIR_CLOSE: i32 = 4362; pub const SQLITE_IOERR_SHMOPEN: i32 = 4618; pub const SQLITE_IOERR_SHMSIZE: i32 = 4874; pub const SQLITE_IOERR_SHMLOCK: i32 = 5130; pub const SQLITE_IOERR_SHMMAP: i32 = 5386; pub const SQLITE_IOERR_SEEK: i32 = 5642; pub const SQLITE_IOERR_DELETE_NOENT: i32 = 5898; pub const SQLITE_IOERR_MMAP: i32 = 6154; pub const SQLITE_IOERR_GETTEMPPATH: i32 = 6410; pub const SQLITE_IOERR_CONVPATH: i32 = 6666; pub const SQLITE_IOERR_VNODE: i32 = 6922; pub const SQLITE_IOERR_AUTH: i32 = 7178; pub const SQLITE_IOERR_BEGIN_ATOMIC: i32 = 7434; pub const SQLITE_IOERR_COMMIT_ATOMIC: i32 = 7690; pub const SQLITE_IOERR_ROLLBACK_ATOMIC: i32 = 7946; pub const SQLITE_IOERR_DATA: i32 = 8202; pub const SQLITE_IOERR_CORRUPTFS: i32 = 8458; pub const SQLITE_IOERR_IN_PAGE: i32 = 8714; pub const SQLITE_LOCKED_SHAREDCACHE: i32 = 262; pub const SQLITE_LOCKED_VTAB: i32 = 518; pub const SQLITE_BUSY_RECOVERY: i32 = 261; pub const SQLITE_BUSY_SNAPSHOT: i32 = 517; pub const SQLITE_BUSY_TIMEOUT: i32 = 773; pub const SQLITE_CANTOPEN_NOTEMPDIR: i32 = 270; pub const SQLITE_CANTOPEN_ISDIR: i32 = 526; pub const SQLITE_CANTOPEN_FULLPATH: i32 = 782; pub const SQLITE_CANTOPEN_CONVPATH: i32 = 1038; pub const SQLITE_CANTOPEN_DIRTYWAL: i32 = 1294; pub const SQLITE_CANTOPEN_SYMLINK: i32 = 1550; pub const SQLITE_CORRUPT_VTAB: i32 = 267; pub const SQLITE_CORRUPT_SEQUENCE: i32 = 523; pub const SQLITE_CORRUPT_INDEX: i32 = 779; pub const SQLITE_READONLY_RECOVERY: i32 = 264; pub const SQLITE_READONLY_CANTLOCK: i32 = 520; pub const SQLITE_READONLY_ROLLBACK: i32 = 776; pub const SQLITE_READONLY_DBMOVED: i32 = 1032; pub const SQLITE_READONLY_CANTINIT: i32 = 1288; pub const SQLITE_READONLY_DIRECTORY: i32 = 1544; pub const SQLITE_ABORT_ROLLBACK: i32 = 516; pub const SQLITE_CONSTRAINT_CHECK: i32 = 275; pub const SQLITE_CONSTRAINT_COMMITHOOK: i32 = 531; pub const SQLITE_CONSTRAINT_FOREIGNKEY: i32 = 787; pub const SQLITE_CONSTRAINT_FUNCTION: i32 = 1043; pub const SQLITE_CONSTRAINT_NOTNULL: i32 = 1299; pub const SQLITE_CONSTRAINT_PRIMARYKEY: i32 = 1555; pub const SQLITE_CONSTRAINT_TRIGGER: i32 = 1811; pub const SQLITE_CONSTRAINT_UNIQUE: i32 = 2067; pub const SQLITE_CONSTRAINT_VTAB: i32 = 2323; pub const SQLITE_CONSTRAINT_ROWID: i32 = 2579; pub const SQLITE_CONSTRAINT_PINNED: i32 = 2835; pub const SQLITE_CONSTRAINT_DATATYPE: i32 = 3091; pub const SQLITE_NOTICE_RECOVER_WAL: i32 = 283; pub const SQLITE_NOTICE_RECOVER_ROLLBACK: i32 = 539; pub const SQLITE_NOTICE_RBU: i32 = 795; pub const SQLITE_WARNING_AUTOINDEX: i32 = 284; pub const SQLITE_AUTH_USER: i32 = 279; pub const SQLITE_OK_LOAD_PERMANENTLY: i32 = 256; pub const SQLITE_OK_SYMLINK: i32 = 512; pub const SQLITE_OPEN_READONLY: i32 = 1; pub const SQLITE_OPEN_READWRITE: i32 = 2; pub const SQLITE_OPEN_CREATE: i32 = 4; pub const SQLITE_OPEN_DELETEONCLOSE: i32 = 8; pub const SQLITE_OPEN_EXCLUSIVE: i32 = 16; pub const SQLITE_OPEN_AUTOPROXY: i32 = 32; pub const SQLITE_OPEN_URI: i32 = 64; pub const SQLITE_OPEN_MEMORY: i32 = 128; pub const SQLITE_OPEN_MAIN_DB: i32 = 256; pub const SQLITE_OPEN_TEMP_DB: i32 = 512; pub const SQLITE_OPEN_TRANSIENT_DB: i32 = 1024; pub const SQLITE_OPEN_MAIN_JOURNAL: i32 = 2048; pub const SQLITE_OPEN_TEMP_JOURNAL: i32 = 4096; pub const SQLITE_OPEN_SUBJOURNAL: i32 = 8192; pub const SQLITE_OPEN_SUPER_JOURNAL: i32 = 16384; pub const SQLITE_OPEN_NOMUTEX: i32 = 32768; pub const SQLITE_OPEN_FULLMUTEX: i32 = 65536; pub const SQLITE_OPEN_SHAREDCACHE: i32 = 131072; pub const SQLITE_OPEN_PRIVATECACHE: i32 = 262144; pub const SQLITE_OPEN_WAL: i32 = 524288; pub const SQLITE_OPEN_NOFOLLOW: i32 = 16777216; pub const SQLITE_OPEN_EXRESCODE: i32 = 33554432; pub const SQLITE_OPEN_MASTER_JOURNAL: i32 = 16384; pub const SQLITE_IOCAP_ATOMIC: i32 = 1; pub const SQLITE_IOCAP_ATOMIC512: i32 = 2; pub const SQLITE_IOCAP_ATOMIC1K: i32 = 4; pub const SQLITE_IOCAP_ATOMIC2K: i32 = 8; pub const SQLITE_IOCAP_ATOMIC4K: i32 = 16; pub const SQLITE_IOCAP_ATOMIC8K: i32 = 32; pub const SQLITE_IOCAP_ATOMIC16K: i32 = 64; pub const SQLITE_IOCAP_ATOMIC32K: i32 = 128; pub const SQLITE_IOCAP_ATOMIC64K: i32 = 256; pub const SQLITE_IOCAP_SAFE_APPEND: i32 = 512; pub const SQLITE_IOCAP_SEQUENTIAL: i32 = 1024; pub const SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN: i32 = 2048; pub const SQLITE_IOCAP_POWERSAFE_OVERWRITE: i32 = 4096; pub const SQLITE_IOCAP_IMMUTABLE: i32 = 8192; pub const SQLITE_IOCAP_BATCH_ATOMIC: i32 = 16384; pub const SQLITE_LOCK_NONE: i32 = 0; pub const SQLITE_LOCK_SHARED: i32 = 1; pub const SQLITE_LOCK_RESERVED: i32 = 2; pub const SQLITE_LOCK_PENDING: i32 = 3; pub const SQLITE_LOCK_EXCLUSIVE: i32 = 4; pub const SQLITE_SYNC_NORMAL: i32 = 2; pub const SQLITE_SYNC_FULL: i32 = 3; pub const SQLITE_SYNC_DATAONLY: i32 = 16; pub const SQLITE_FCNTL_LOCKSTATE: i32 = 1; pub const SQLITE_FCNTL_GET_LOCKPROXYFILE: i32 = 2; pub const SQLITE_FCNTL_SET_LOCKPROXYFILE: i32 = 3; pub const SQLITE_FCNTL_LAST_ERRNO: i32 = 4; pub const SQLITE_FCNTL_SIZE_HINT: i32 = 5; pub const SQLITE_FCNTL_CHUNK_SIZE: i32 = 6; pub const SQLITE_FCNTL_FILE_POINTER: i32 = 7; pub const SQLITE_FCNTL_SYNC_OMITTED: i32 = 8; pub const SQLITE_FCNTL_WIN32_AV_RETRY: i32 = 9; pub const SQLITE_FCNTL_PERSIST_WAL: i32 = 10; pub const SQLITE_FCNTL_OVERWRITE: i32 = 11; pub const SQLITE_FCNTL_VFSNAME: i32 = 12; pub const SQLITE_FCNTL_POWERSAFE_OVERWRITE: i32 = 13; pub const SQLITE_FCNTL_PRAGMA: i32 = 14; pub const SQLITE_FCNTL_BUSYHANDLER: i32 = 15; pub const SQLITE_FCNTL_TEMPFILENAME: i32 = 16; pub const SQLITE_FCNTL_MMAP_SIZE: i32 = 18; pub const SQLITE_FCNTL_TRACE: i32 = 19; pub const SQLITE_FCNTL_HAS_MOVED: i32 = 20; pub const SQLITE_FCNTL_SYNC: i32 = 21; pub const SQLITE_FCNTL_COMMIT_PHASETWO: i32 = 22; pub const SQLITE_FCNTL_WIN32_SET_HANDLE: i32 = 23; pub const SQLITE_FCNTL_WAL_BLOCK: i32 = 24; pub const SQLITE_FCNTL_ZIPVFS: i32 = 25; pub const SQLITE_FCNTL_RBU: i32 = 26; pub const SQLITE_FCNTL_VFS_POINTER: i32 = 27; pub const SQLITE_FCNTL_JOURNAL_POINTER: i32 = 28; pub const SQLITE_FCNTL_WIN32_GET_HANDLE: i32 = 29; pub const SQLITE_FCNTL_PDB: i32 = 30; pub const SQLITE_FCNTL_BEGIN_ATOMIC_WRITE: i32 = 31; pub const SQLITE_FCNTL_COMMIT_ATOMIC_WRITE: i32 = 32; pub const SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE: i32 = 33; pub const SQLITE_FCNTL_LOCK_TIMEOUT: i32 = 34; pub const SQLITE_FCNTL_DATA_VERSION: i32 = 35; pub const SQLITE_FCNTL_SIZE_LIMIT: i32 = 36; pub const SQLITE_FCNTL_CKPT_DONE: i32 = 37; pub const SQLITE_FCNTL_RESERVE_BYTES: i32 = 38; pub const SQLITE_FCNTL_CKPT_START: i32 = 39; pub const SQLITE_FCNTL_EXTERNAL_READER: i32 = 40; pub const SQLITE_FCNTL_CKSM_FILE: i32 = 41; pub const SQLITE_FCNTL_RESET_CACHE: i32 = 42; pub const SQLITE_GET_LOCKPROXYFILE: i32 = 2; pub const SQLITE_SET_LOCKPROXYFILE: i32 = 3; pub const SQLITE_LAST_ERRNO: i32 = 4; pub const SQLITE_ACCESS_EXISTS: i32 = 0; pub const SQLITE_ACCESS_READWRITE: i32 = 1; pub const SQLITE_ACCESS_READ: i32 = 2; pub const SQLITE_SHM_UNLOCK: i32 = 1; pub const SQLITE_SHM_LOCK: i32 = 2; pub const SQLITE_SHM_SHARED: i32 = 4; pub const SQLITE_SHM_EXCLUSIVE: i32 = 8; pub const SQLITE_SHM_NLOCK: i32 = 8; pub const SQLITE_CONFIG_SINGLETHREAD: i32 = 1; pub const SQLITE_CONFIG_MULTITHREAD: i32 = 2; pub const SQLITE_CONFIG_SERIALIZED: i32 = 3; pub const SQLITE_CONFIG_MALLOC: i32 = 4; pub const SQLITE_CONFIG_GETMALLOC: i32 = 5; pub const SQLITE_CONFIG_SCRATCH: i32 = 6; pub const SQLITE_CONFIG_PAGECACHE: i32 = 7; pub const SQLITE_CONFIG_HEAP: i32 = 8; pub const SQLITE_CONFIG_MEMSTATUS: i32 = 9; pub const SQLITE_CONFIG_MUTEX: i32 = 10; pub const SQLITE_CONFIG_GETMUTEX: i32 = 11; pub const SQLITE_CONFIG_LOOKASIDE: i32 = 13; pub const SQLITE_CONFIG_PCACHE: i32 = 14; pub const SQLITE_CONFIG_GETPCACHE: i32 = 15; pub const SQLITE_CONFIG_LOG: i32 = 16; pub const SQLITE_CONFIG_URI: i32 = 17; pub const SQLITE_CONFIG_PCACHE2: i32 = 18; pub const SQLITE_CONFIG_GETPCACHE2: i32 = 19; pub const SQLITE_CONFIG_COVERING_INDEX_SCAN: i32 = 20; pub const SQLITE_CONFIG_SQLLOG: i32 = 21; pub const SQLITE_CONFIG_MMAP_SIZE: i32 = 22; pub const SQLITE_CONFIG_WIN32_HEAPSIZE: i32 = 23; pub const SQLITE_CONFIG_PCACHE_HDRSZ: i32 = 24; pub const SQLITE_CONFIG_PMASZ: i32 = 25; pub const SQLITE_CONFIG_STMTJRNL_SPILL: i32 = 26; pub const SQLITE_CONFIG_SMALL_MALLOC: i32 = 27; pub const SQLITE_CONFIG_SORTERREF_SIZE: i32 = 28; pub const SQLITE_CONFIG_MEMDB_MAXSIZE: i32 = 29; pub const SQLITE_CONFIG_ROWID_IN_VIEW: i32 = 30; pub const SQLITE_DBCONFIG_MAINDBNAME: i32 = 1000; pub const SQLITE_DBCONFIG_LOOKASIDE: i32 = 1001; pub const SQLITE_DBCONFIG_ENABLE_FKEY: i32 = 1002; pub const SQLITE_DBCONFIG_ENABLE_TRIGGER: i32 = 1003; pub const SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER: i32 = 1004; pub const SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION: i32 = 1005; pub const SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE: i32 = 1006; pub const SQLITE_DBCONFIG_ENABLE_QPSG: i32 = 1007; pub const SQLITE_DBCONFIG_TRIGGER_EQP: i32 = 1008; pub const SQLITE_DBCONFIG_RESET_DATABASE: i32 = 1009; pub const SQLITE_DBCONFIG_DEFENSIVE: i32 = 1010; pub const SQLITE_DBCONFIG_WRITABLE_SCHEMA: i32 = 1011; pub const SQLITE_DBCONFIG_LEGACY_ALTER_TABLE: i32 = 1012; pub const SQLITE_DBCONFIG_DQS_DML: i32 = 1013; pub const SQLITE_DBCONFIG_DQS_DDL: i32 = 1014; pub const SQLITE_DBCONFIG_ENABLE_VIEW: i32 = 1015; pub const SQLITE_DBCONFIG_LEGACY_FILE_FORMAT: i32 = 1016; pub const SQLITE_DBCONFIG_TRUSTED_SCHEMA: i32 = 1017; pub const SQLITE_DBCONFIG_STMT_SCANSTATUS: i32 = 1018; pub const SQLITE_DBCONFIG_REVERSE_SCANORDER: i32 = 1019; pub const SQLITE_DBCONFIG_MAX: i32 = 1019; pub const SQLITE_DENY: i32 = 1; pub const SQLITE_IGNORE: i32 = 2; pub const SQLITE_CREATE_INDEX: i32 = 1; pub const SQLITE_CREATE_TABLE: i32 = 2; pub const SQLITE_CREATE_TEMP_INDEX: i32 = 3; pub const SQLITE_CREATE_TEMP_TABLE: i32 = 4; pub const SQLITE_CREATE_TEMP_TRIGGER: i32 = 5; pub const SQLITE_CREATE_TEMP_VIEW: i32 = 6; pub const SQLITE_CREATE_TRIGGER: i32 = 7; pub const SQLITE_CREATE_VIEW: i32 = 8; pub const SQLITE_DELETE: i32 = 9; pub const SQLITE_DROP_INDEX: i32 = 10; pub const SQLITE_DROP_TABLE: i32 = 11; pub const SQLITE_DROP_TEMP_INDEX: i32 = 12; pub const SQLITE_DROP_TEMP_TABLE: i32 = 13; pub const SQLITE_DROP_TEMP_TRIGGER: i32 = 14; pub const SQLITE_DROP_TEMP_VIEW: i32 = 15; pub const SQLITE_DROP_TRIGGER: i32 = 16; pub const SQLITE_DROP_VIEW: i32 = 17; pub const SQLITE_INSERT: i32 = 18; pub const SQLITE_PRAGMA: i32 = 19; pub const SQLITE_READ: i32 = 20; pub const SQLITE_SELECT: i32 = 21; pub const SQLITE_TRANSACTION: i32 = 22; pub const SQLITE_UPDATE: i32 = 23; pub const SQLITE_ATTACH: i32 = 24; pub const SQLITE_DETACH: i32 = 25; pub const SQLITE_ALTER_TABLE: i32 = 26; pub const SQLITE_REINDEX: i32 = 27; pub const SQLITE_ANALYZE: i32 = 28; pub const SQLITE_CREATE_VTABLE: i32 = 29; pub const SQLITE_DROP_VTABLE: i32 = 30; pub const SQLITE_FUNCTION: i32 = 31; pub const SQLITE_SAVEPOINT: i32 = 32; pub const SQLITE_COPY: i32 = 0; pub const SQLITE_RECURSIVE: i32 = 33; pub const SQLITE_TRACE_STMT: i32 = 1; pub const SQLITE_TRACE_PROFILE: i32 = 2; pub const SQLITE_TRACE_ROW: i32 = 4; pub const SQLITE_TRACE_CLOSE: i32 = 8; pub const SQLITE_LIMIT_LENGTH: i32 = 0; pub const SQLITE_LIMIT_SQL_LENGTH: i32 = 1; pub const SQLITE_LIMIT_COLUMN: i32 = 2; pub const SQLITE_LIMIT_EXPR_DEPTH: i32 = 3; pub const SQLITE_LIMIT_COMPOUND_SELECT: i32 = 4; pub const SQLITE_LIMIT_VDBE_OP: i32 = 5; pub const SQLITE_LIMIT_FUNCTION_ARG: i32 = 6; pub const SQLITE_LIMIT_ATTACHED: i32 = 7; pub const SQLITE_LIMIT_LIKE_PATTERN_LENGTH: i32 = 8; pub const SQLITE_LIMIT_VARIABLE_NUMBER: i32 = 9; pub const SQLITE_LIMIT_TRIGGER_DEPTH: i32 = 10; pub const SQLITE_LIMIT_WORKER_THREADS: i32 = 11; pub const SQLITE_PREPARE_PERSISTENT: ::std::os::raw::c_uint = 1; pub const SQLITE_PREPARE_NORMALIZE: ::std::os::raw::c_uint = 2; pub const SQLITE_PREPARE_NO_VTAB: ::std::os::raw::c_uint = 4; pub const SQLITE_INTEGER: i32 = 1; pub const SQLITE_FLOAT: i32 = 2; pub const SQLITE_BLOB: i32 = 4; pub const SQLITE_NULL: i32 = 5; pub const SQLITE_TEXT: i32 = 3; pub const SQLITE3_TEXT: i32 = 3; pub const SQLITE_UTF8: i32 = 1; pub const SQLITE_UTF16LE: i32 = 2; pub const SQLITE_UTF16BE: i32 = 3; pub const SQLITE_UTF16: i32 = 4; pub const SQLITE_ANY: i32 = 5; pub const SQLITE_UTF16_ALIGNED: i32 = 8; pub const SQLITE_DETERMINISTIC: i32 = 2048; pub const SQLITE_DIRECTONLY: i32 = 524288; pub const SQLITE_SUBTYPE: i32 = 1048576; pub const SQLITE_INNOCUOUS: i32 = 2097152; pub const SQLITE_RESULT_SUBTYPE: i32 = 16777216; pub const SQLITE_WIN32_DATA_DIRECTORY_TYPE: i32 = 1; pub const SQLITE_WIN32_TEMP_DIRECTORY_TYPE: i32 = 2; pub const SQLITE_TXN_NONE: i32 = 0; pub const SQLITE_TXN_READ: i32 = 1; pub const SQLITE_TXN_WRITE: i32 = 2; pub const SQLITE_INDEX_SCAN_UNIQUE: i32 = 1; pub const SQLITE_INDEX_CONSTRAINT_EQ: i32 = 2; pub const SQLITE_INDEX_CONSTRAINT_GT: i32 = 4; pub const SQLITE_INDEX_CONSTRAINT_LE: i32 = 8; pub const SQLITE_INDEX_CONSTRAINT_LT: i32 = 16; pub const SQLITE_INDEX_CONSTRAINT_GE: i32 = 32; pub const SQLITE_INDEX_CONSTRAINT_MATCH: i32 = 64; pub const SQLITE_INDEX_CONSTRAINT_LIKE: i32 = 65; pub const SQLITE_INDEX_CONSTRAINT_GLOB: i32 = 66; pub const SQLITE_INDEX_CONSTRAINT_REGEXP: i32 = 67; pub const SQLITE_INDEX_CONSTRAINT_NE: i32 = 68; pub const SQLITE_INDEX_CONSTRAINT_ISNOT: i32 = 69; pub const SQLITE_INDEX_CONSTRAINT_ISNOTNULL: i32 = 70; pub const SQLITE_INDEX_CONSTRAINT_ISNULL: i32 = 71; pub const SQLITE_INDEX_CONSTRAINT_IS: i32 = 72; pub const SQLITE_INDEX_CONSTRAINT_LIMIT: i32 = 73; pub const SQLITE_INDEX_CONSTRAINT_OFFSET: i32 = 74; pub const SQLITE_INDEX_CONSTRAINT_FUNCTION: i32 = 150; pub const SQLITE_MUTEX_FAST: i32 = 0; pub const SQLITE_MUTEX_RECURSIVE: i32 = 1; pub const SQLITE_MUTEX_STATIC_MAIN: i32 = 2; pub const SQLITE_MUTEX_STATIC_MEM: i32 = 3; pub const SQLITE_MUTEX_STATIC_MEM2: i32 = 4; pub const SQLITE_MUTEX_STATIC_OPEN: i32 = 4; pub const SQLITE_MUTEX_STATIC_PRNG: i32 = 5; pub const SQLITE_MUTEX_STATIC_LRU: i32 = 6; pub const SQLITE_MUTEX_STATIC_LRU2: i32 = 7; pub const SQLITE_MUTEX_STATIC_PMEM: i32 = 7; pub const SQLITE_MUTEX_STATIC_APP1: i32 = 8; pub const SQLITE_MUTEX_STATIC_APP2: i32 = 9; pub const SQLITE_MUTEX_STATIC_APP3: i32 = 10; pub const SQLITE_MUTEX_STATIC_VFS1: i32 = 11; pub const SQLITE_MUTEX_STATIC_VFS2: i32 = 12; pub const SQLITE_MUTEX_STATIC_VFS3: i32 = 13; pub const SQLITE_MUTEX_STATIC_MASTER: i32 = 2; pub const SQLITE_TESTCTRL_FIRST: i32 = 5; pub const SQLITE_TESTCTRL_PRNG_SAVE: i32 = 5; pub const SQLITE_TESTCTRL_PRNG_RESTORE: i32 = 6; pub const SQLITE_TESTCTRL_PRNG_RESET: i32 = 7; pub const SQLITE_TESTCTRL_FK_NO_ACTION: i32 = 7; pub const SQLITE_TESTCTRL_BITVEC_TEST: i32 = 8; pub const SQLITE_TESTCTRL_FAULT_INSTALL: i32 = 9; pub const SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: i32 = 10; pub const SQLITE_TESTCTRL_PENDING_BYTE: i32 = 11; pub const SQLITE_TESTCTRL_ASSERT: i32 = 12; pub const SQLITE_TESTCTRL_ALWAYS: i32 = 13; pub const SQLITE_TESTCTRL_RESERVE: i32 = 14; pub const SQLITE_TESTCTRL_JSON_SELFCHECK: i32 = 14; pub const SQLITE_TESTCTRL_OPTIMIZATIONS: i32 = 15; pub const SQLITE_TESTCTRL_ISKEYWORD: i32 = 16; pub const SQLITE_TESTCTRL_SCRATCHMALLOC: i32 = 17; pub const SQLITE_TESTCTRL_INTERNAL_FUNCTIONS: i32 = 17; pub const SQLITE_TESTCTRL_LOCALTIME_FAULT: i32 = 18; pub const SQLITE_TESTCTRL_EXPLAIN_STMT: i32 = 19; pub const SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD: i32 = 19; pub const SQLITE_TESTCTRL_NEVER_CORRUPT: i32 = 20; pub const SQLITE_TESTCTRL_VDBE_COVERAGE: i32 = 21; pub const SQLITE_TESTCTRL_BYTEORDER: i32 = 22; pub const SQLITE_TESTCTRL_ISINIT: i32 = 23; pub const SQLITE_TESTCTRL_SORTER_MMAP: i32 = 24; pub const SQLITE_TESTCTRL_IMPOSTER: i32 = 25; pub const SQLITE_TESTCTRL_PARSER_COVERAGE: i32 = 26; pub const SQLITE_TESTCTRL_RESULT_INTREAL: i32 = 27; pub const SQLITE_TESTCTRL_PRNG_SEED: i32 = 28; pub const SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS: i32 = 29; pub const SQLITE_TESTCTRL_SEEK_COUNT: i32 = 30; pub const SQLITE_TESTCTRL_TRACEFLAGS: i32 = 31; pub const SQLITE_TESTCTRL_TUNE: i32 = 32; pub const SQLITE_TESTCTRL_LOGEST: i32 = 33; pub const SQLITE_TESTCTRL_USELONGDOUBLE: i32 = 34; pub const SQLITE_TESTCTRL_LAST: i32 = 34; pub const SQLITE_STATUS_MEMORY_USED: i32 = 0; pub const SQLITE_STATUS_PAGECACHE_USED: i32 = 1; pub const SQLITE_STATUS_PAGECACHE_OVERFLOW: i32 = 2; pub const SQLITE_STATUS_SCRATCH_USED: i32 = 3; pub const SQLITE_STATUS_SCRATCH_OVERFLOW: i32 = 4; pub const SQLITE_STATUS_MALLOC_SIZE: i32 = 5; pub const SQLITE_STATUS_PARSER_STACK: i32 = 6; pub const SQLITE_STATUS_PAGECACHE_SIZE: i32 = 7; pub const SQLITE_STATUS_SCRATCH_SIZE: i32 = 8; pub const SQLITE_STATUS_MALLOC_COUNT: i32 = 9; pub const SQLITE_DBSTATUS_LOOKASIDE_USED: i32 = 0; pub const SQLITE_DBSTATUS_CACHE_USED: i32 = 1; pub const SQLITE_DBSTATUS_SCHEMA_USED: i32 = 2; pub const SQLITE_DBSTATUS_STMT_USED: i32 = 3; pub const SQLITE_DBSTATUS_LOOKASIDE_HIT: i32 = 4; pub const SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE: i32 = 5; pub const SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: i32 = 6; pub const SQLITE_DBSTATUS_CACHE_HIT: i32 = 7; pub const SQLITE_DBSTATUS_CACHE_MISS: i32 = 8; pub const SQLITE_DBSTATUS_CACHE_WRITE: i32 = 9; pub const SQLITE_DBSTATUS_DEFERRED_FKS: i32 = 10; pub const SQLITE_DBSTATUS_CACHE_USED_SHARED: i32 = 11; pub const SQLITE_DBSTATUS_CACHE_SPILL: i32 = 12; pub const SQLITE_DBSTATUS_MAX: i32 = 12; pub const SQLITE_STMTSTATUS_FULLSCAN_STEP: i32 = 1; pub const SQLITE_STMTSTATUS_SORT: i32 = 2; pub const SQLITE_STMTSTATUS_AUTOINDEX: i32 = 3; pub const SQLITE_STMTSTATUS_VM_STEP: i32 = 4; pub const SQLITE_STMTSTATUS_REPREPARE: i32 = 5; pub const SQLITE_STMTSTATUS_RUN: i32 = 6; pub const SQLITE_STMTSTATUS_FILTER_MISS: i32 = 7; pub const SQLITE_STMTSTATUS_FILTER_HIT: i32 = 8; pub const SQLITE_STMTSTATUS_MEMUSED: i32 = 99; pub const SQLITE_CHECKPOINT_PASSIVE: i32 = 0; pub const SQLITE_CHECKPOINT_FULL: i32 = 1; pub const SQLITE_CHECKPOINT_RESTART: i32 = 2; pub const SQLITE_CHECKPOINT_TRUNCATE: i32 = 3; pub const SQLITE_VTAB_CONSTRAINT_SUPPORT: i32 = 1; pub const SQLITE_VTAB_INNOCUOUS: i32 = 2; pub const SQLITE_VTAB_DIRECTONLY: i32 = 3; pub const SQLITE_VTAB_USES_ALL_SCHEMAS: i32 = 4; pub const SQLITE_ROLLBACK: i32 = 1; pub const SQLITE_FAIL: i32 = 3; pub const SQLITE_REPLACE: i32 = 5; pub const SQLITE_SCANSTAT_NLOOP: i32 = 0; pub const SQLITE_SCANSTAT_NVISIT: i32 = 1; pub const SQLITE_SCANSTAT_EST: i32 = 2; pub const SQLITE_SCANSTAT_NAME: i32 = 3; pub const SQLITE_SCANSTAT_EXPLAIN: i32 = 4; pub const SQLITE_SCANSTAT_SELECTID: i32 = 5; pub const SQLITE_SCANSTAT_PARENTID: i32 = 6; pub const SQLITE_SCANSTAT_NCYCLE: i32 = 7; pub const SQLITE_SCANSTAT_COMPLEX: i32 = 1; pub const SQLITE_SERIALIZE_NOCOPY: ::std::os::raw::c_uint = 1; pub const SQLITE_DESERIALIZE_FREEONCLOSE: ::std::os::raw::c_uint = 1; pub const SQLITE_DESERIALIZE_RESIZEABLE: ::std::os::raw::c_uint = 2; pub const SQLITE_DESERIALIZE_READONLY: ::std::os::raw::c_uint = 4; pub const NOT_WITHIN: i32 = 0; pub const PARTLY_WITHIN: i32 = 1; pub const FULLY_WITHIN: i32 = 2; pub const SQLITE_SESSION_OBJCONFIG_SIZE: i32 = 1; pub const SQLITE_SESSION_OBJCONFIG_ROWID: i32 = 2; pub const SQLITE_CHANGESETSTART_INVERT: i32 = 2; pub const SQLITE_CHANGESETAPPLY_NOSAVEPOINT: i32 = 1; pub const SQLITE_CHANGESETAPPLY_INVERT: i32 = 2; pub const SQLITE_CHANGESETAPPLY_IGNORENOOP: i32 = 4; pub const SQLITE_CHANGESETAPPLY_FKNOACTION: i32 = 8; pub const SQLITE_CHANGESET_DATA: i32 = 1; pub const SQLITE_CHANGESET_NOTFOUND: i32 = 2; pub const SQLITE_CHANGESET_CONFLICT: i32 = 3; pub const SQLITE_CHANGESET_CONSTRAINT: i32 = 4; pub const SQLITE_CHANGESET_FOREIGN_KEY: i32 = 5; pub const SQLITE_CHANGESET_OMIT: i32 = 0; pub const SQLITE_CHANGESET_REPLACE: i32 = 1; pub const SQLITE_CHANGESET_ABORT: i32 = 2; pub const SQLITE_SESSION_CONFIG_STRMSIZE: i32 = 1; pub const FTS5_TOKENIZE_QUERY: i32 = 1; pub const FTS5_TOKENIZE_PREFIX: i32 = 2; pub const FTS5_TOKENIZE_DOCUMENT: i32 = 4; pub const FTS5_TOKENIZE_AUX: i32 = 8; pub const FTS5_TOKEN_COLOCATED: i32 = 1; extern "C" { pub static sqlite3_version: [::std::os::raw::c_char; 0usize]; } extern "C" { pub fn sqlite3_libversion() -> *const ::std::os::raw::c_char; } extern "C" { pub fn sqlite3_sourceid() -> *const ::std::os::raw::c_char; } extern "C" { pub fn sqlite3_libversion_number() -> ::std::os::raw::c_int; } extern "C" { pub fn sqlite3_compileoption_used( zOptName: *const ::std::os::raw::c_char, ) -> ::std::os::raw::c_int; } extern "C" { pub fn sqlite3_compileoption_get(N: ::std::os::raw::c_int) -> *const ::std::os::raw::c_char; } extern "C" { pub fn sqlite3_threadsafe() -> ::std::os::raw::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3 { _unused: [u8; 0], } pub type sqlite_int64 = ::std::os::raw::c_longlong; pub type sqlite_uint64 = ::std::os::raw::c_ulonglong; pub type sqlite3_int64 = sqlite_int64; pub type sqlite3_uint64 = sqlite_uint64; extern "C" { pub fn sqlite3_close(arg1: *mut sqlite3) -> ::std::os::raw::c_int; } pub type sqlite3_callback = ::std::option::Option< unsafe extern "C" fn( arg1: *mut ::std::os::raw::c_void, arg2: ::std::os::raw::c_int, arg3: *mut *mut ::std::os::raw::c_char, arg4: *mut *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int, >; extern "C" { pub fn sqlite3_exec( arg1: *mut sqlite3, sql: *const ::std::os::raw::c_char, callback: ::std::option::Option< unsafe extern "C" fn( arg1: *mut ::std::os::raw::c_void, arg2: ::std::os::raw::c_int, arg3: *mut *mut ::std::os::raw::c_char, arg4: *mut *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int, >, arg2: *mut ::std::os::raw::c_void, errmsg: *mut *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_file { pub pMethods: *const sqlite3_io_methods, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_io_methods { pub iVersion: ::std::os::raw::c_int, pub xClose: ::std::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file) -> ::std::os::raw::c_int, >, pub xRead: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, arg2: *mut ::std::os::raw::c_void, iAmt: ::std::os::raw::c_int, iOfst: sqlite3_int64, ) -> ::std::os::raw::c_int, >, pub xWrite: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, arg2: *const ::std::os::raw::c_void, iAmt: ::std::os::raw::c_int, iOfst: sqlite3_int64, ) -> ::std::os::raw::c_int, >, pub xTruncate: ::std::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file, size: sqlite3_int64) -> ::std::os::raw::c_int, >, pub xSync: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, flags: ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xFileSize: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, pSize: *mut sqlite3_int64, ) -> ::std::os::raw::c_int, >, pub xLock: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, arg2: ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xUnlock: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, arg2: ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xCheckReservedLock: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, pResOut: *mut ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xFileControl: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, op: ::std::os::raw::c_int, pArg: *mut ::std::os::raw::c_void, ) -> ::std::os::raw::c_int, >, pub xSectorSize: ::std::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file) -> ::std::os::raw::c_int, >, pub xDeviceCharacteristics: ::std::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file) -> ::std::os::raw::c_int, >, pub xShmMap: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, iPg: ::std::os::raw::c_int, pgsz: ::std::os::raw::c_int, arg2: ::std::os::raw::c_int, arg3: *mut *mut ::std::os::raw::c_void, ) -> ::std::os::raw::c_int, >, pub xShmLock: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, offset: ::std::os::raw::c_int, n: ::std::os::raw::c_int, flags: ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xShmBarrier: ::std::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_file)>, pub xShmUnmap: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, deleteFlag: ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xFetch: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, iOfst: sqlite3_int64, iAmt: ::std::os::raw::c_int, pp: *mut *mut ::std::os::raw::c_void, ) -> ::std::os::raw::c_int, >, pub xUnfetch: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, iOfst: sqlite3_int64, p: *mut ::std::os::raw::c_void, ) -> ::std::os::raw::c_int, >, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_mutex { _unused: [u8; 0], } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_api_routines { _unused: [u8; 0], } pub type sqlite3_filename = *const ::std::os::raw::c_char; pub type sqlite3_syscall_ptr = ::std::option::Option<unsafe extern "C" fn()>; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_vfs { pub iVersion: ::std::os::raw::c_int, pub szOsFile: ::std::os::raw::c_int, pub mxPathname: ::std::os::raw::c_int, pub pNext: *mut sqlite3_vfs, pub zName: *const ::std::os::raw::c_char, pub pAppData: *mut ::std::os::raw::c_void, pub xOpen: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: sqlite3_filename, arg2: *mut sqlite3_file, flags: ::std::os::raw::c_int, pOutFlags: *mut ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xDelete: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const ::std::os::raw::c_char, syncDir: ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xAccess: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const ::std::os::raw::c_char, flags: ::std::os::raw::c_int, pResOut: *mut ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xFullPathname: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const ::std::os::raw::c_char, nOut: ::std::os::raw::c_int, zOut: *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int, >, pub xDlOpen: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zFilename: *const ::std::os::raw::c_char, ) -> *mut ::std::os::raw::c_void, >, pub xDlError: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, nByte: ::std::os::raw::c_int, zErrMsg: *mut ::std::os::raw::c_char, ), >, pub xDlSym: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, arg2: *mut ::std::os::raw::c_void, zSymbol: *const ::std::os::raw::c_char, ) -> ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, arg2: *mut ::std::os::raw::c_void, zSymbol: *const ::std::os::raw::c_char, ), >, >, pub xDlClose: ::std::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_vfs, arg2: *mut ::std::os::raw::c_void), >, pub xRandomness: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, nByte: ::std::os::raw::c_int, zOut: *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int, >, pub xSleep: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, microseconds: ::std::os::raw::c_int, ) -> ::std::os::raw::c_int, >, pub xCurrentTime: ::std::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_vfs, arg2: *mut f64) -> ::std::os::raw::c_int, >, pub xGetLastError: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, arg2: ::std::os::raw::c_int, arg3: *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int, >, pub xCurrentTimeInt64: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, arg2: *mut sqlite3_int64, ) -> ::std::os::raw::c_int, >, pub xSetSystemCall: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const ::std::os::raw::c_char, arg2: sqlite3_syscall_ptr, ) -> ::std::os::raw::c_int, >, pub xGetSystemCall: ::std::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const ::std::os::raw::c_char, ) -> sqlite3_syscall_ptr,
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
true
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/shim/third-party/rust/top/main.rs
shim/third-party/rust/top/main.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. #![allow(unused_crate_dependencies)] fn main() {}
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/macro_test_util/macro_test_util.rs
macro_test_util/macro_test_util.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::fmt::Display; use proc_macro2::TokenStream; use proc_macro2::TokenTree; fn mismatch(ta: Option<TokenTree>, tb: Option<TokenTree>, ax: &TokenStream, bx: &TokenStream) -> ! { panic!( "Mismatch!\nLeft: {}\nRight: {}\nwhen comparing\nLeft: {}\nRight: {}\n", ta.map_or("None".into(), |t| t.to_string()), tb.map_or("None".into(), |t| t.to_string()), ax, bx ); } pub fn assert_pat_eq<E: Display>(a: Result<TokenStream, E>, b: TokenStream) { let a = match a { Err(err) => { panic!("Unexpected error '{}'", err); } Ok(ok) => ok, }; fn inner_cmp(a: TokenStream, b: TokenStream, ax: &TokenStream, bx: &TokenStream) { let mut ia = a.into_iter(); let mut ib = b.into_iter(); loop { let t_a = ia.next(); let t_b = ib.next(); if t_a.is_none() && t_b.is_none() { break; } if t_a.is_none() || t_b.is_none() { mismatch(t_a, t_b, ax, bx); } let t_a = t_a.unwrap(); let t_b = t_b.unwrap(); match (&t_a, &t_b) { (TokenTree::Ident(a), TokenTree::Ident(b)) if a == b => {} (TokenTree::Literal(a), TokenTree::Literal(b)) if a.to_string() == b.to_string() => {} (TokenTree::Punct(a), TokenTree::Punct(b)) if a.to_string() == b.to_string() => {} (TokenTree::Group(ga), TokenTree::Group(gb)) if ga.delimiter() == gb.delimiter() => { inner_cmp(ga.stream(), gb.stream(), ax, bx); } (TokenTree::Group(_), _) | (TokenTree::Ident(_), _) | (TokenTree::Punct(_), _) | (TokenTree::Literal(_), _) => mismatch(Some(t_a), Some(t_b), ax, bx), } } } inner_cmp(a.clone(), b.clone(), &a, &b); } pub fn assert_error<E: Display>(a: Result<TokenStream, E>, b: &str) { match a { Ok(a) => panic!("Expected error but got:\n{}", a), Err(e) => { let a = format!("{}", e); assert_eq!(a, b, "Incorrect error") } } }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false
facebook/ocamlrep
https://github.com/facebook/ocamlrep/blob/3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6/ocamlrep_ocamlpool/lib.rs
ocamlrep_ocamlpool/lib.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use std::ffi::CString; use std::panic::UnwindSafe; pub use bumpalo::Bump; use ocamlrep::Allocator; use ocamlrep::BlockBuilder; pub use ocamlrep::FromOcamlRep; pub use ocamlrep::FromOcamlRepIn; use ocamlrep::MemoizationCache; use ocamlrep::ToOcamlRep; pub use ocamlrep::Value; unsafe extern "C" { fn ocamlpool_enter(); fn ocamlpool_leave(); fn ocamlpool_reserve_block(tag: u8, size: usize) -> usize; fn caml_failwith(msg: *const i8); fn caml_initialize(addr: *mut usize, value: usize); static ocamlpool_generation: usize; pub fn caml_named_value(name: *const std::ffi::c_char) -> *mut usize; pub fn caml_callbackN_exn(closure: usize, n: std::ffi::c_int, args: *const usize) -> usize; } pub struct Pool { cache: MemoizationCache, } impl Pool { /// Prepare the ocamlpool library to allocate values directly on the OCaml /// runtime's garbage-collected heap. /// /// # Safety /// /// The OCaml runtime is not thread-safe, and this function will interact /// with it. If any other thread interacts with the OCaml runtime or /// ocamlpool library during the lifetime of the `Pool`, undefined behavior /// will result. #[inline(always)] pub unsafe fn new() -> Self { unsafe { ocamlpool_enter(); } Self { cache: MemoizationCache::new(), } } #[inline(always)] pub fn add<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> { value.to_ocamlrep(self) } } impl Drop for Pool { #[inline(always)] fn drop(&mut self) { unsafe { ocamlpool_leave() }; } } impl Allocator for Pool { #[inline(always)] fn generation(&self) -> usize { unsafe { ocamlpool_generation } } #[inline(always)] fn block_with_size_and_tag(&self, size: usize, tag: u8) -> BlockBuilder<'_> { let ptr = unsafe { ocamlpool_reserve_block(tag, size) as *mut Value<'_> }; BlockBuilder::new(unsafe { std::slice::from_raw_parts_mut(ptr, size) }) } #[inline(always)] fn set_field<'a>(&self, block: &mut BlockBuilder<'a>, index: usize, value: Value<'a>) { assert!(index < block.size()); unsafe { caml_initialize( (self.block_ptr_mut(block) as *mut usize).add(index), value.to_bits(), ) }; } unsafe fn block_ptr_mut<'a>(&self, block: &mut BlockBuilder<'a>) -> *mut Value<'a> { block.address() as *mut _ } fn memoized<'a>( &'a self, ptr: usize, size: usize, f: impl FnOnce(&'a Self) -> Value<'a>, ) -> Value<'a> { let bits = self.cache.memoized(ptr, size, || f(self).to_bits()); // SAFETY: The only memoized values in the cache are those computed in // the closure on the previous line. Since f returns Value<'a>, any // cached bits must represent a valid Value<'a>, unsafe { Value::from_bits(bits) } } fn add_root<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> { self.cache.with_cache(|| value.to_ocamlrep(self)) } } /// Convert the given value to an OCaml value on the OCaml runtime's /// garbage-collected heap. /// /// # Safety /// /// The OCaml runtime is not thread-safe, and this function will interact with /// it. If any other thread interacts with the OCaml runtime or ocamlpool /// library during the execution of `to_ocaml`, undefined behavior will result. /// /// # Panics /// /// Panics upon attempts to re-enter `to_ocaml`. #[inline(always)] pub unsafe fn to_ocaml<T: ToOcamlRep + ?Sized>(value: &T) -> usize { let pool = unsafe { Pool::new() }; let result = pool.add_root(value); result.to_bits() } /// Catches panics in `f` and raises a OCaml exception of type Failure /// with the panic message (if the panic was raised with a `&str` or `String`). pub fn catch_unwind(f: impl FnOnce() -> usize + UnwindSafe) -> usize { catch_unwind_with_handler(f, |msg: &str| -> Result<usize, String> { Err(msg.into()) }) } /// Catches panics in `f` and raises a OCaml exception of type Failure /// with the panic message (if the panic was raised with a `&str` or `String`). /// `h` handles panic msg, it may re-raise by returning Err. pub fn catch_unwind_with_handler( f: impl FnOnce() -> usize + UnwindSafe, h: impl FnOnce(&str) -> Result<usize, String>, ) -> usize { let err = match std::panic::catch_unwind(f) { Ok(value) => return value, Err(err) => err, }; let msg: &str = if let Some(s) = err.downcast_ref::<&str>() { s } else if let Some(s) = err.downcast_ref::<String>() { s.as_str() } else { // TODO: Build a smarter message in this case (using panic::set_hook?) "Panicked with non-string object" }; match h(msg) { Ok(value) => return value, Err(err) => unsafe { let msg = CString::new(err).unwrap(); caml_failwith(msg.as_ptr().cast()); }, } unreachable!(); } /// Assume that some Pool exists in some parent scope. Since ocamlpool is /// implemented with statics, we don't need a reference to that pool to write to /// it. /// /// Does not preserve sharing of values referred to by multiple references or /// Rcs (but sharing is preserved for `ocamlrep::rc::RcOc`). /// /// # Safety /// /// The OCaml runtime is not thread-safe, and this function will interact with /// it. If any other thread interacts with the OCaml runtime or ocamlpool /// library during the execution of this function, undefined behavior will /// result. #[inline(always)] pub unsafe fn add_to_ambient_pool<T: ToOcamlRep>(value: &T) -> usize { let fake_pool = Pool { cache: MemoizationCache::new(), }; let result = value.to_ocamlrep(&fake_pool).to_bits(); std::mem::forget(fake_pool); result } /// Check if an OCaml value is an exception. /// /// For internal purposes. pub fn is_exception_result(v: usize) -> bool { v & 3 == 2 } #[macro_export] macro_rules! ocaml_ffi_fn { (fn $name:ident($($param:ident: $ty:ty),+ $(,)?) -> $ret:ty $code:block) => { #[unsafe(no_mangle)] pub unsafe extern "C" fn $name ($($param: usize,)*) -> usize { $crate::catch_unwind(|| { fn inner($($param: $ty,)*) -> $ret { $code } use $crate::FromOcamlRep; $(let $param = <$ty>::from_ocaml($param).unwrap();)* let result = inner($($param,)*); $crate::to_ocaml(&result) }) } }; (fn $name:ident() -> $ret:ty $code:block) => { #[unsafe(no_mangle)] pub unsafe extern "C" fn $name (_unit: usize) -> usize { $crate::catch_unwind(|| { fn inner() -> $ret { $code } let result = inner(); $crate::to_ocaml(&result) }) } }; (fn $name:ident($($param:ident: $ty:ty),* $(,)?) $code:block) => { $crate::ocaml_ffi_fn! { fn $name($($param: $ty),*) -> () $code } }; } /// Convenience macro for declaring OCaml FFI wrappers. /// /// Each parameter will be converted from OCaml using `ocamlrep` and the result /// will be converted to OCaml using `ocamlrep` and allocated on the OCaml GC /// heap using `ocamlpool`. /// /// Panics in the function body will be caught and converted to an OCaml /// exception of type Failure. #[macro_export] macro_rules! ocaml_ffi { ($(fn $name:ident($($param:ident: $ty:ty),* $(,)?) $(-> $ret:ty)? $code:block)*) => { $($crate::ocaml_ffi_fn! { fn $name($($param: $ty),*) $(-> $ret)* $code })* }; } #[macro_export] macro_rules! ocaml_ffi_with_arena_fn { (fn $name:ident<$lifetime:lifetime>($arena:ident: $arena_ty:ty, $($param:ident: $ty:ty),+ $(,)?) -> $ret:ty $code:block) => { #[unsafe(no_mangle)] pub unsafe extern "C" fn $name ($($param: usize,)*) -> usize { $crate::catch_unwind(|| { use $crate::FromOcamlRepIn; let arena = &$crate::Bump::new(); fn inner<$lifetime>($arena: $arena_ty, $($param: usize,)*) -> $ret { $(let $param = unsafe { <$ty>::from_ocamlrep_in($crate::Value::from_bits($param), $arena).unwrap() };)* $code } let result = inner(arena, $($param,)*); $crate::to_ocaml(&result) }) } }; (fn $name:ident<$lifetime:lifetime>($arena:ident: $arena_ty:ty $(,)?) -> $ret:ty $code:block) => { #[unsafe(no_mangle)] pub unsafe extern "C" fn $name (_unit: usize) -> usize { $crate::catch_unwind(|| { fn inner<$lifetime>($arena: $arena_ty) -> $ret { $code } let arena = &$crate::Bump::new(); let result = inner(arena); $crate::to_ocaml(&result) }) } }; (fn $name:ident<$lifetime:lifetime>($($param:ident: $ty:ty),* $(,)?) $code:block) => { $crate::ocaml_ffi_with_arena_fn! { fn $name<$lifetime>($($param: $ty),*) -> () $code } }; } /// Convenience macro for declaring OCaml FFI wrappers which use an arena to /// allocate the arguments and return value. /// /// FFI functions declared with this macro must declare exactly one lifetime /// parameter. The function's first value parameter must be a reference to a /// `bumpalo::Bump` arena with that lifetime: /// /// ``` /// ocaml_ffi_with_arena! { /// fn swap_str_pair<'a>(arena: &'a Bump, pair: (&'a str, &'a str)) -> (&'a str, &'a str) { /// (pair.1, pair.0) /// } /// } /// ``` /// /// An OCaml extern declaration for this function would look like this: /// /// ``` /// external swap_str_pair : string * string -> string * string = "swap_str_pair" /// ``` /// /// Note that no parameter for the arena appears on the OCaml side--it is /// constructed on the Rust side and lives only for the duration of one FFI /// call. /// /// Each (non-arena) parameter will be converted from OCaml using /// `ocamlrep::FromOcamlRepIn`, and allocated in the given arena (if its /// `FromOcamlRepIn` implementation makes use of the arena). /// /// The return value (which may be allocated in the given arena, if convenient) /// will be converted to OCaml using `ocamlrep::ToOcamlRep`. The converted OCaml /// value will be allocated on the OCaml heap using `ocamlpool`. /// /// Panics in the function body will be caught and converted to an OCaml /// exception of type `Failure`. #[macro_export] macro_rules! ocaml_ffi_with_arena { ($(fn $name:ident<$lifetime:lifetime>($($param:ident: $ty:ty),* $(,)?) $(-> $ret:ty)? $code:block)*) => { $($crate::ocaml_ffi_with_arena_fn! { fn $name<$lifetime>($($param: $ty),*) $(-> $ret)* $code })* }; } #[macro_export] macro_rules! ocaml_ffi_arena_result_fn { (fn $name:ident<$lifetime:lifetime>($arena:ident: $arena_ty:ty, $($param:ident: $ty:ty),+ $(,)?) -> $ret:ty $code:block) => { #[unsafe(no_mangle)] pub unsafe extern "C" fn $name ($($param: usize,)*) -> usize { $crate::catch_unwind(|| { fn inner<$lifetime>($arena: $arena_ty, $($param: $ty,)*) -> $ret { $code } use $crate::FromOcamlRep; $(let $param = <$ty>::from_ocaml($param).unwrap();)* let arena = &$crate::Bump::new(); let result = inner(arena, $($param,)*); $crate::to_ocaml(&result) }) } }; } /// Convenience macro for declaring OCaml FFI wrappers which use an arena to /// just the return value. /// /// FFI functions declared with this macro must declare exactly one lifetime /// parameter. The function's first value parameter must be a reference to a /// `bumpalo::Bump` arena with that lifetime: /// /// ``` /// ocaml_ffi_arena_result! { /// fn parse_example<'a>(arena: &'a Bump, text: String) -> ('a str, &'a str) { /// /* copy parts of text into arena while parsing */ /// (pair.1, pair.0) /// } /// } /// ``` /// /// An OCaml extern declaration for this function would look like this: /// /// ``` /// external parse_example : string -> string * string = "parse_example" /// ``` /// /// Note that no parameter for the arena appears on the OCaml side--it is /// constructed on the Rust side and lives only for the duration of one FFI /// call. /// /// Each parameter after the arena parameter will be converted from OCaml using /// `ocamlrep` and passed in as an owned value. /// /// The return value (which may be allocated in the given arena, if convenient) /// will be converted to OCaml using `ocamlrep::ToOcamlRep`. The converted OCaml /// value will be allocated on the OCaml heap using `ocamlpool`. /// /// Panics in the function body will be caught and converted to an OCaml /// exception of type `Failure`. #[macro_export] macro_rules! ocaml_ffi_arena_result { ($(fn $name:ident<$lifetime:lifetime>($($param:ident: $ty:ty),* $(,)?) $(-> $ret:ty)? $code:block)*) => { $($crate::ocaml_ffi_arena_result_fn! { fn $name<$lifetime>($($param: $ty),*) $(-> $ret)* $code })* }; } #[macro_export] macro_rules! ocaml_registered_function_fn { // This needs to be first, as macro matching is linear. // // caml_callback_exn works as it directly calls into the implemented OCaml functions. // // caml_callback{2,3,N}_exn don't work, as they go through caml_apply2, // caml_apply3 etc. which for some reason crashes! // // TODO: FIgure out how to make caml_apply2 and friends not crash, and remove the below rule. ($ocaml_name:expr, fn $name:ident($param1:ident: $ty1:ty, $($params:ident: $ty:ty),+ $(,)?) -> $ret:ty) => { compile_error!("We don't support functions with more than one parameter."); }; ($ocaml_name:expr, fn $name:ident($($param:ident: $ty:ty),+ $(,)?) -> $ret:ty) => { #[unsafe(no_mangle)] pub unsafe fn $name ($($param: $ty,)*) -> $ret { use std::sync::OnceLock; static FN: OnceLock<usize> = OnceLock::new(); let the_function_to_call = *FN.get_or_init(|| { let the_function_to_call_name = std::ffi::CString::new($ocaml_name).expect("string contained null byte"); let the_function_to_call = $crate::caml_named_value(the_function_to_call_name.as_ptr()); if the_function_to_call.is_null() { panic!("Could not find function. Use Callback.register"); } *the_function_to_call }); let args_to_function: Vec<usize> = vec![$($crate::to_ocaml(&$param),)*]; let args_to_function_ptr: *const usize = args_to_function.as_ptr(); let result = $crate::caml_callbackN_exn(the_function_to_call, args_to_function.len().try_into().unwrap(), args_to_function_ptr); if $crate::is_exception_result(result) { panic!("OCaml function threw an unknown exception"); } let result = <$ret>::from_ocaml(result).unwrap(); result } }; ($ocaml_name:expr, fn $name:ident() -> $ret:ty) => { unsafe fn $name() -> $ret { $crate::ocaml_registered_function_fn!( $ocaml_name, fn inner(_unit: ()) -> $ret ); inner(()) } }; ($ocaml_name:expr, fn $name:ident($($param:ident: $ty:ty),* $(,)?)) => { $crate::ocaml_registered_function_fn!( $ocaml_name, fn $name($($param: $ty),*) -> () ); }; } /// Convenience macro for declaring Rust FFI wrappers around OCaml-defined functions. /// /// Each parameter will be converted to OCaml using `ocamlrep` and allocated on /// the OCaml GC heap using `ocamlpool`. The result will be converted from OCaml /// using `ocamlrep`. /// /// Exceptions in OCaml will be caught and converted to a Rust panic. The panic /// will not contain useful information due to the limitations of deserializing /// arbitrary OCaml exceptions. #[macro_export] macro_rules! ocaml_registered_function { ($(fn $name:ident($($param:ident: $ty:ty),* $(,)?) $(-> $ret:ty)?;)*) => { $($crate::ocaml_registered_function_fn!( stringify!($name), fn $name($($param: $ty),*) $(-> $ret)* );)* }; }
rust
MIT
3ed6e41c17c6d05e19121b59beb8efbb1a7ce3b6
2026-01-04T20:16:50.959951Z
false